repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
aevri/mel | mel/cmddebug/meldebug.py | 1 | 3100 | """Mel Debug - for debugging the internals of the 'mel' command."""
import argparse
import sys
import mel.cmd.error
import mel.cmddebug.benchautomark
import mel.cmddebug.genrepo
import mel.cmddebug.rendervaluefield
import mel.lib.ui
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
)
subparsers = parser.add_subparsers()
# Work around a bug in argparse with subparsers no longer being required:
# http://bugs.python.org/issue9253#msg186387
subparsers.required = True
subparsers.dest = "command"
# vulture will report these as unused unless we do this
#
# pylint: disable=pointless-statement
subparsers.required
subparsers.dest
# pylint: enable=pointless-statement
_setup_parser_for_module(
subparsers, mel.cmddebug.benchautomark, "bench-automark"
)
_setup_parser_for_module(subparsers, mel.cmddebug.genrepo, "gen-repo")
_setup_parser_for_module(
subparsers, mel.cmddebug.rendervaluefield, "render-valuefield"
)
args = parser.parse_args()
try:
return args.func(args)
except mel.cmd.error.UsageError as e:
print("Usage error:", e, file=sys.stderr)
return 2
except BrokenPipeError:
# Silently exit on broken pipes, e.g. when our output is piped to head.
# Explicitly close stderr before exiting, to avoid an additional
# message from Python on stderr about the pipe break being ignored.
# http://bugs.python.org/issue11380,#msg153320
sys.stderr.close()
except mel.lib.ui.AbortKeyInterruptError:
# Using this return code may also break us out of an outer loop, e.g.
# 'xargs' will stop processing if the program it calls exists with 255.
return 255
def _setup_parser_for_module(subparsers, module, name):
doc = module.__doc__
doc_subject = doc.splitlines()[0]
doc_epilog = "\n".join(doc.splitlines()[1:])
parser = subparsers.add_parser(
name,
formatter_class=argparse.RawDescriptionHelpFormatter,
help=doc_subject,
description=doc_subject,
epilog=doc_epilog,
)
module.setup_parser(parser)
parser.set_defaults(func=module.process_args)
if __name__ == "__main__":
sys.exit(main())
# -----------------------------------------------------------------------------
# Copyright (C) 2016-2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| apache-2.0 | -5,583,005,775,866,671,000 | 31.978723 | 79 | 0.660968 | false |
FarnazH/horton | horton/__init__.py | 4 | 1447 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''The main HORTON Package'''
__version__ = '2.1.0'
# Extensions are imported first to call fpufix as early as possible
from horton.cext import *
from horton.cache import *
from horton.constants import *
from horton.context import *
from horton.part import *
from horton.espfit import *
from horton.exceptions import *
from horton.gbasis import *
from horton.grid import *
from horton.io import *
from horton.log import *
from horton.meanfield import *
from horton.moments import *
from horton.periodic import *
from horton.quadprog import *
from horton.units import *
from horton.utils import *
from horton.modelhamiltonians import *
| gpl-3.0 | 5,630,239,729,071,877,000 | 30.456522 | 69 | 0.756738 | false |
tgillet1/PASTA | tree.py | 1 | 1739 | from sequence import NeuriteSequence
class TreeIndexLogic():
'''
A class which captures tree-specific logic given two characters.
@param char1: First character
@param char2: Second character
'''
def __init__(self, char1, char2):
self.char1 = char1
self.char2 = char2
def get(self):
if (self.char1 == 'A' and self.char2 == 'C') or (self.char2 == 'A' and self.char1 == 'C'):
return 'A'
if (self.char1 == 'A' and self.char2 == '-') or (self.char2 == 'A' and self.char1 == '-'):
return 'A'
if (self.char1 == 'T' and self.char2 == '-') or (self.char2 == 'T' and self.char1 == '-'):
return 'T'
if (self.char1 == 'C' and self.char2 == '-') or (self.char2 == 'C' and self.char1 == '-'):
return 'C'
raise Exception("Improper character alignment: "+self.char1+" with "+self.char2)
class TreeLogicFactory():
'''
Parses and processes the composite string to ultimately yield a single
string which encapsulate the pairwise alignment.
'''
def __init__(self, str1, str2):
self.str1 = str1
self.str2 = str2
def get_alignment(self):
'''
Simple function to merge two strings and produce a composite.
@return: NeuriteSequence object representing the composite sequence.
'''
composite = ''
for idx, char1 in enumerate(self.str1):
char2 = self.str2[idx]
if char1 == self.str2[idx]:
composite += char1
else:
# Apply neuronal logic given two specific characters.
composite += TreeIndexLogic(char1, char2).get()
return composite
| mit | -1,689,353,366,102,372,600 | 36.804348 | 98 | 0.562967 | false |
zak-k/iris | docs/iris/example_code/General/cross_section.py | 6 | 1297 | """
Cross section plots
===================
This example demonstrates contour plots of a cross-sectioned multi-dimensional
cube which features a hybrid height vertical coordinate system.
"""
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Enable a future option, to ensure that the netcdf load works the same way
# as in future Iris versions.
iris.FUTURE.netcdf_promote = True
# Load some test data.
fname = iris.sample_data_path('hybrid_height.nc')
theta = iris.load_cube(fname, 'air_potential_temperature')
# Extract a single height vs longitude cross-section. N.B. This could
# easily be changed to extract a specific slice, or even to loop over *all*
# cross section slices.
cross_section = next(theta.slices(['grid_longitude',
'model_level_number']))
qplt.contourf(cross_section, coords=['grid_longitude', 'altitude'],
cmap='RdBu_r')
iplt.show()
# Now do the equivalent plot, only against model level
plt.figure()
qplt.contourf(cross_section,
coords=['grid_longitude', 'model_level_number'],
cmap='RdBu_r')
iplt.show()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,278,188,299,362,343,400 | 27.195652 | 79 | 0.639167 | false |
calexil/FightstickDisplay | pyglet/event.py | 1 | 19212 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Event dispatch framework.
All objects that produce events in pyglet implement :py:class:`~pyglet.event.EventDispatcher`,
providing a consistent interface for registering and manipulating event
handlers. A commonly used event dispatcher is `pyglet.window.Window`.
Event types
===========
For each event dispatcher there is a set of events that it dispatches; these
correspond with the type of event handlers you can attach. Event types are
identified by their name, for example, ''on_resize''. If you are creating a
new class which implements :py:class:`~pyglet.event.EventDispatcher`, you must call
`EventDispatcher.register_event_type` for each event type.
Attaching event handlers
========================
An event handler is simply a function or method. You can attach an event
handler by setting the appropriate function on the instance::
def on_resize(width, height):
# ...
dispatcher.on_resize = on_resize
There is also a convenience decorator that reduces typing::
@dispatcher.event
def on_resize(width, height):
# ...
You may prefer to subclass and override the event handlers instead::
class MyDispatcher(DispatcherClass):
def on_resize(self, width, height):
# ...
Event handler stack
===================
When attaching an event handler to a dispatcher using the above methods, it
replaces any existing handler (causing the original handler to no longer be
called). Each dispatcher maintains a stack of event handlers, allowing you to
insert an event handler "above" the existing one rather than replacing it.
There are two main use cases for "pushing" event handlers:
* Temporarily intercepting the events coming from the dispatcher by pushing a
custom set of handlers onto the dispatcher, then later "popping" them all
off at once.
* Creating "chains" of event handlers, where the event propagates from the
top-most (most recently added) handler to the bottom, until a handler
takes care of it.
Use `EventDispatcher.push_handlers` to create a new level in the stack and
attach handlers to it. You can push several handlers at once::
dispatcher.push_handlers(on_resize, on_key_press)
If your function handlers have different names to the events they handle, use
keyword arguments::
dispatcher.push_handlers(on_resize=my_resize, on_key_press=my_key_press)
After an event handler has processed an event, it is passed on to the
next-lowest event handler, unless the handler returns `EVENT_HANDLED`, which
prevents further propagation.
To remove all handlers on the top stack level, use
`EventDispatcher.pop_handlers`.
Note that any handlers pushed onto the stack have precedence over the
handlers set directly on the instance (for example, using the methods
described in the previous section), regardless of when they were set.
For example, handler ``foo`` is called before handler ``bar`` in the following
example::
dispatcher.push_handlers(on_resize=foo)
dispatcher.on_resize = bar
Dispatching events
==================
pyglet uses a single-threaded model for all application code. Event
handlers are only ever invoked as a result of calling
EventDispatcher.dispatch_events`.
It is up to the specific event dispatcher to queue relevant events until they
can be dispatched, at which point the handlers are called in the order the
events were originally generated.
This implies that your application runs with a main loop that continuously
updates the application state and checks for new events::
while True:
dispatcher.dispatch_events()
# ... additional per-frame processing
Not all event dispatchers require the call to ``dispatch_events``; check with
the particular class documentation.
.. note::
In order to prevent issues with garbage collection, the
:py:class:`~pyglet.event.EventDispatcher` class only holds weak
references to pushed event handlers. That means the following example
will not work, because the pushed object will fall out of scope and be
collected::
dispatcher.push_handlers(MyHandlerClass())
Instead, you must make sure to keep a reference to the object before pushing
it. For example::
my_handler_instance = MyHandlerClass()
dispatcher.push_handlers(my_handler_instance)
"""
import inspect
from functools import partial
from weakref import WeakMethod
EVENT_HANDLED = True
EVENT_UNHANDLED = None
class EventException(Exception):
"""An exception raised when an event handler could not be attached.
"""
pass
class EventDispatcher:
"""Generic event dispatcher interface.
See the module docstring for usage.
"""
# Placeholder empty stack; real stack is created only if needed
_event_stack = ()
@classmethod
def register_event_type(cls, name):
"""Register an event type with the dispatcher.
Registering event types allows the dispatcher to validate event
handler names as they are attached, and to search attached objects for
suitable handlers.
:Parameters:
`name` : str
Name of the event to register.
"""
if not hasattr(cls, 'event_types'):
cls.event_types = []
cls.event_types.append(name)
return name
def push_handlers(self, *args, **kwargs):
"""Push a level onto the top of the handler stack, then attach zero or
more event handlers.
If keyword arguments are given, they name the event type to attach.
Otherwise, a callable's `__name__` attribute will be used. Any other
object may also be specified, in which case it will be searched for
callables with event names.
"""
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = []
# Place dict full of new handlers at beginning of stack
self._event_stack.insert(0, {})
self.set_handlers(*args, **kwargs)
def _get_handlers(self, args, kwargs):
"""Implement handler matching on arguments for set_handlers and
remove_handlers.
"""
for obj in args:
if inspect.isroutine(obj):
# Single magically named function
name = obj.__name__
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
if inspect.ismethod(obj):
yield name, WeakMethod(obj, partial(self._remove_handler, name))
else:
yield name, obj
else:
# Single instance with magically named methods
for name in dir(obj):
if name in self.event_types:
meth = getattr(obj, name)
yield name, WeakMethod(meth, partial(self._remove_handler, name))
for name, handler in kwargs.items():
# Function for handling given event (no magic)
if name not in self.event_types:
raise EventException('Unknown event "%s"' % name)
if inspect.ismethod(handler):
yield name, WeakMethod(handler, partial(self._remove_handler, name))
else:
yield name, handler
def set_handlers(self, *args, **kwargs):
"""Attach one or more event handlers to the top level of the handler
stack.
See :py:meth:`~pyglet.event.EventDispatcher.push_handlers` for the accepted argument types.
"""
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
for name, handler in self._get_handlers(args, kwargs):
self.set_handler(name, handler)
def set_handler(self, name, handler):
"""Attach a single event handler.
:Parameters:
`name` : str
Name of the event type to attach to.
`handler` : callable
Event handler to attach.
"""
# Create event stack if necessary
if type(self._event_stack) is tuple:
self._event_stack = [{}]
self._event_stack[0][name] = handler
def pop_handlers(self):
"""Pop the top level of event handlers off the stack.
"""
assert self._event_stack and 'No handlers pushed'
del self._event_stack[0]
def remove_handlers(self, *args, **kwargs):
"""Remove event handlers from the event stack.
See :py:meth:`~pyglet.event.EventDispatcher.push_handlers` for the
accepted argument types. All handlers are removed from the first stack
frame that contains any of the given handlers. No error is raised if
any handler does not appear in that frame, or if no stack frame
contains any of the given handlers.
If the stack frame is empty after removing the handlers, it is
removed from the stack. Note that this interferes with the expected
symmetry of :py:meth:`~pyglet.event.EventDispatcher.push_handlers` and
:py:meth:`~pyglet.event.EventDispatcher.pop_handlers`.
"""
handlers = list(self._get_handlers(args, kwargs))
# Find the first stack frame containing any of the handlers
def find_frame():
for frame in self._event_stack:
for name, handler in handlers:
try:
if frame[name] == handler:
return frame
except KeyError:
pass
frame = find_frame()
# No frame matched; no error.
if not frame:
return
# Remove each handler from the frame.
for name, handler in handlers:
try:
if frame[name] == handler:
del frame[name]
except KeyError:
pass
# Remove the frame if it's empty.
if not frame:
self._event_stack.remove(frame)
def remove_handler(self, name, handler):
"""Remove a single event handler.
The given event handler is removed from the first handler stack frame
it appears in. The handler must be the exact same callable as passed
to `set_handler`, `set_handlers` or
:py:meth:`~pyglet.event.EventDispatcher.push_handlers`; and the name
must match the event type it is bound to.
No error is raised if the event handler is not set.
:Parameters:
`name` : str
Name of the event type to remove.
`handler` : callable
Event handler to remove.
"""
for frame in self._event_stack:
try:
if frame[name] == handler:
del frame[name]
break
except KeyError:
pass
def _remove_handler(self, name, handler):
"""Used internally to remove all handler instances for the given event name.
This is normally called from a dead ``WeakMethod`` to remove itself from the
event stack.
"""
# Iterate over a copy as we might mutate the list
for frame in list(self._event_stack):
if name in frame:
try:
if frame[name] == handler:
del frame[name]
if not frame:
self._event_stack.remove(frame)
except TypeError:
# weakref is already dead
pass
def dispatch_event(self, event_type, *args):
"""Dispatch a single event to the attached handlers.
The event is propagated to all handlers from from the top of the stack
until one returns `EVENT_HANDLED`. This method should be used only by
:py:class:`~pyglet.event.EventDispatcher` implementors; applications should call
the ``dispatch_events`` method.
Since pyglet 1.2, the method returns `EVENT_HANDLED` if an event
handler returned `EVENT_HANDLED` or `EVENT_UNHANDLED` if all events
returned `EVENT_UNHANDLED`. If no matching event handlers are in the
stack, ``False`` is returned.
:Parameters:
`event_type` : str
Name of the event.
`args` : sequence
Arguments to pass to the event handler.
:rtype: bool or None
:return: (Since pyglet 1.2) `EVENT_HANDLED` if an event handler
returned `EVENT_HANDLED`; `EVENT_UNHANDLED` if one or more event
handlers were invoked but returned only `EVENT_UNHANDLED`;
otherwise ``False``. In pyglet 1.1 and earlier, the return value
is always ``None``.
"""
assert hasattr(self, 'event_types'), (
"No events registered on this EventDispatcher. "
"You need to register events with the class method "
"EventDispatcher.register_event_type('event_name')."
)
assert event_type in self.event_types,\
"%r not found in %r.event_types == %r" % (event_type, self, self.event_types)
invoked = False
# Search handler stack for matching event handlers
for frame in list(self._event_stack):
handler = frame.get(event_type, None)
if not handler:
continue
if isinstance(handler, WeakMethod):
handler = handler()
assert handler is not None
try:
invoked = True
if handler(*args):
return EVENT_HANDLED
except TypeError as exception:
self._raise_dispatch_exception(event_type, args, handler, exception)
# Check instance for an event handler
try:
if getattr(self, event_type)(*args):
return EVENT_HANDLED
except AttributeError as e:
event_op = getattr(self, event_type, None)
if callable(event_op):
raise e
except TypeError as exception:
self._raise_dispatch_exception(event_type, args, getattr(self, event_type), exception)
else:
invoked = True
if invoked:
return EVENT_UNHANDLED
return False
def _raise_dispatch_exception(self, event_type, args, handler, exception):
# A common problem in applications is having the wrong number of
# arguments in an event handler. This is caught as a TypeError in
# dispatch_event but the error message is obfuscated.
#
# Here we check if there is indeed a mismatch in argument count,
# and construct a more useful exception message if so. If this method
# doesn't find a problem with the number of arguments, the error
# is re-raised as if we weren't here.
n_args = len(args)
# Inspect the handler
argspecs = inspect.getfullargspec(handler)
handler_args = argspecs.args
handler_varargs = argspecs.varargs
handler_defaults = argspecs.defaults
n_handler_args = len(handler_args)
# Remove "self" arg from handler if it's a bound method
if inspect.ismethod(handler) and handler.__self__:
n_handler_args -= 1
# Allow *args varargs to overspecify arguments
if handler_varargs:
n_handler_args = max(n_handler_args, n_args)
# Allow default values to overspecify arguments
if (n_handler_args > n_args and handler_defaults and
n_handler_args - len(handler_defaults) <= n_args):
n_handler_args = n_args
if n_handler_args != n_args:
if inspect.isfunction(handler) or inspect.ismethod(handler):
descr = "'%s' at %s:%d" % (handler.__name__,
handler.__code__.co_filename,
handler.__code__.co_firstlineno)
else:
descr = repr(handler)
raise TypeError("The '{0}' event was dispatched with {1} arguments, "
"but your handler {2} accepts only {3} arguments.".format(
event_type, len(args), descr, len(handler_args)))
else:
raise exception
def event(self, *args):
"""Function decorator for an event handler.
Usage::
win = window.Window()
@win.event
def on_resize(self, width, height):
# ...
or::
@win.event('on_resize')
def foo(self, width, height):
# ...
"""
if len(args) == 0: # @window.event()
def decorator(func):
name = func.__name__
self.set_handler(name, func)
return func
return decorator
elif inspect.isroutine(args[0]): # @window.event
func = args[0]
name = func.__name__
self.set_handler(name, func)
return args[0]
elif isinstance(args[0], str): # @window.event('on_resize')
name = args[0]
def decorator(func):
self.set_handler(name, func)
return func
return decorator
| gpl-3.0 | 4,335,954,009,987,605,500 | 36.017341 | 99 | 0.616229 | false |
Svalorzen/AI-Toolbox | test/Python/POMDP/GapMinTests.py | 1 | 1985 | import unittest
import sys
import os
sys.path.append(os.getcwd())
from AIToolbox import POMDP
class POMDPPythonGapMin(unittest.TestCase):
def chengD35(self):
# Actions are: 0-listen, 1-open-left, 2-open-right
S = 3
A = 3
O = 3
model = POMDP.Model(O, S, A)
# SAS form
t = [0,0,0]
r = [0,0,0]
# SAO form
o = [0,0,0]
t[0] = [
[0.445, 0.222, 0.333],
[0.234, 0.064, 0.702],
[0.535, 0.313, 0.152],
]
t[1] = [
[0.500, 0.173, 0.327],
[0.549, 0.218, 0.233],
[0.114, 0.870, 0.016],
]
t[2] = [
[0.204, 0.553, 0.243],
[0.061, 0.466, 0.473],
[0.325, 0.360, 0.315],
]
o[0] = [
[0.686, 0.182, 0.132],
[0.698, 0.131, 0.171],
[0.567, 0.234, 0.199],
]
o[1] = [
[0.138, 0.786, 0.076],
[0.283, 0.624, 0.093],
[0.243, 0.641, 0.116],
]
o[2] = [
[0.279, 0.083, 0.638],
[0.005, 0.202, 0.793],
[0.186, 0.044, 0.770],
]
r[0] = [
[5.2] * 3,
[0.8] * 3,
[9.0] * 3,
]
r[1] = [
[4.6] * 3,
[6.8] * 3,
[9.3] * 3,
]
r[2] = [
[4.1] * 3,
[6.9] * 3,
[0.8] * 3,
]
model.setTransitionFunction(t)
model.setRewardFunction(r)
model.setObservationFunction(o)
model.setDiscount(0.999)
return model
def test_solver(self):
gm = POMDP.GapMin(0.005, 3);
model = self.chengD35();
initialBelief = [1.0/3, 1.0/3, 1.0/3]
lb, ub, vlist, qfun = gm(model, initialBelief);
self.assertTrue(9.0 < ub - lb and ub - lb < 11.0);
if __name__ == '__main__':
unittest.main(verbosity=2)
| gpl-3.0 | 2,645,549,243,620,513,300 | 19.255102 | 58 | 0.379849 | false |
Lamecarlate/gourmet | gourmet/importers/html_importer.py | 6 | 20973 | import urllib, re, tempfile, os.path
import importer
import BeautifulSoup
import socket
from gourmet.gdebug import debug
from gettext import gettext as _
import traceback
DEFAULT_SOCKET_TIMEOUT=45.0
URLOPEN_SOCKET_TIMEOUT=15.0
socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)
# To add additional HTML import sites, see html_rules.py
def read_socket_w_progress (socket, progress, message):
"""Read piecemeal reporting progress as we go."""
if not progress: data = socket.read()
else:
bs = 1024 * 8
if hasattr(socket,'headers'):
fs = int(socket.headers.get('content-length',-1))
else: fs = -1
block = socket.read(bs)
data = block
sofar = bs
while block:
if fs>0: progress(float(sofar)/fs, message)
else: progress(-1, message)
sofar += bs
block = socket.read(bs)
data += block
socket.close()
return data
def get_url (url, progress):
"""Return data from URL, possibly displaying progress."""
if type(url)==str:
socket.setdefaulttimeout(URLOPEN_SOCKET_TIMEOUT)
sock = urllib.urlopen(url)
socket.setdefaulttimeout(DEFAULT_SOCKET_TIMEOUT)
return read_socket_w_progress(sock,progress,_('Retrieving %s'%url))
else:
sock = url
return read_socket_w_progress(sock,progress,_('Retrieving file'))
class MyBeautifulSoup (BeautifulSoup.ICantBelieveItsBeautifulSoup):
def __init__ (self, *args, **kwargs):
# Avoid invalid doctype decls of the type
# <!DOCTYPE foo ... />
# From the overly XML zealous folks at sfgate...
# http://sfgate.com/cgi-bin/article.cgi?f=/chronicle/archive/2006/08/16/FDG1LKHOMG1.DTL
self.PARSER_MASSAGE.append(
(re.compile('<!([^<>]*)/>',),
lambda x: '<!'+x.group(1)+'>'
)
)
kwargs['avoidParserProblems']=True
BeautifulSoup.ICantBelieveItsBeautifulSoup.__init__(self,*args,**kwargs)
def handle_comment (self, text): pass
def handle_decl (self, data): pass
def handle_pi (self, text): pass
class BeautifulSoupScraper:
"""We take a set of rules and create a scraper using BeautifulSoup.
This will be quite wonderfully magical. Handed rules, we can
customize a scraper for any set of data from any website.
Writing new rules should be simpler than writing a new class would
be. The rules will take the following form:
['foobar',DIRECTIONS_TO_TAG,METHOD_OF_STORAGE, POST_PROCESSING]
DIRECTIONS_TO_TAG is a list of instructions followed to find our
tag. We can search by tagname and attributes or by text. By
default, we drill down the structure each time.
METHOD_OF_STORAGE is either TEXT or MARKUP, depending what we want
to store in our return dictionary.
OPTIONAL POST_PROCESSING, which can be a function or a regexp. If
it is a regexp, it should have a grouping construct which will
contain the text we want to keep.
"""
TEXT = 'text'
MARKUP = 'markup'
def __init__ (self, rules):
"""Set up a scraper according to a list of rules."""
self.rules = rules
def feed_url (self, url,progress=None):
"""Feed ourselves a url.
URL can be a string or an already open socket.
"""
self.url = url
self.feed_data(get_url(url,progress))
def feed_data (self, data):
self.soup = MyBeautifulSoup(data)
def scrape_url (self, url, progress=None):
self.feed_url(url,progress)
return self.scrape()
def scrape_data (self, data):
self.feed_data(data)
return self.scrape()
def scrape (self):
"""Do our actual scraping according to our rules."""
self.dic = {}
for rule in self.rules:
self.apply_rule(rule)
return self.dic
def apply_rule (self, rule):
"""Apply a rule from our rule list."""
if len(rule)==3:
store_as,tagpath,retmethod = rule
post_processing=None
elif len(rule)==4:
store_as,tagpath,retmethod,post_processing=rule
else:
raise Exception("Rule %s is invalid (it should be 3 or 4 items long)." % rule)
tag = self.get_tag_according_to_path(tagpath)
self.store_tag(store_as,tag,retmethod,post_processing)
def post_process (self, post_processing, value, tag):
"""Post process value according to post_processing
post_processing is either callable (and will return a modified
string based on what it's handed), or a tuple: (regexp,
force_match).
The regexp must always yield the desired value in the first
grouping construct (if you require something more complicated,
write a lambda).
If force_match is True, return '' if there is no
match. Otherwise, default to the unadulterated value.
"""
if type(post_processing) == tuple and len(post_processing)==2:
regexp=re.compile(post_processing[0],re.UNICODE)
m=regexp.search(value)
if m: return m.groups()[0]
else:
if post_processing[1]: return ""
else: return value
elif callable(post_processing):
return post_processing(value,tag)
else:
return value
def get_tag_according_to_path (self, path):
"""Follow path to tag.
Path is a list of instructions.
"""
base = self.soup
for step in path:
base=self.follow_path(base,step)
if type(base)==list:
# then we'd better be the last step
break
return base
def follow_path (self, base, step):
"""Follow step from base of base.
Base is a tag. Step is a set of instructions as a dictionary.
{'regexp':regexp}
{'string':string}
OR
{'tag':tagname,
'attributes':{attr:name,attr:name,...},
'index': NUMBER or [FIRST,LAST],
}
"""
if not base: return # path ran out...
ind=step.get('index',0)
if step.has_key('regexp'):
ret = base.fetchText(re.compile(step['regexp']))
elif step.has_key('string'):
ret = base.fetchText('string')
else:
get_to = None
if ind:
if type(ind)==list: get_to=ind[-1]
elif type(ind)==int: get_to=ind
if not get_to or get_to < 0: get_to=None
else: get_to += 1
if get_to:
ret = base.fetch(step.get('tag'),
step.get('attributes',{}),
get_to)
else:
ret = base.fetch(step.get('tag'),step.get('attributes',{}))
if ret:
# if we have moveto, we do it with our index -- for
# example, if we have step['moveto']='parent', we grab the
# parents of each tag we would otherwise return. This can
# also work for previousSibling, nextSibling, etc.
if step.has_key('moveto'):
ret = [getattr(o,step['moveto']) for o in ret]
else:
for motion in ['firstNext','firstPrevious','findParent']:
if step.has_key(motion):
ret = [getattr(o,motion)(step[motion]) for o in ret]
break
if type(ind)==list or type(ind)==tuple:
return ret[ind[0]:ind[1]]
else: #ind is an integer
if ind < len(ret):
return ret[ind]
else:
print 'Problem following path.'
print 'I am supposed to get item: ',ind
print 'from: ',ret
print 'instructions were : ',
try: print 'base: ',base
except UnicodeDecodeError: print '(ugly unicodeness)'
try: print 'step: ',step
except UnicodeDecodeError: print '(ugly unicodeness)'
def store_tag (self, name, tag, method, post_processing=None):
"""Store our tag in our dictionary according to our method."""
if type(tag)==list:
for t in tag: self.store_tag(name,t,method,post_processing)
return
if method==self.TEXT:
if tag: val = get_text(tag)
else: val = ""
elif method==self.MARKUP:
if tag: val = tag.prettify()
else: val = ""
else: #otherwise, we assume our method is an attribute name
val = ""
if tag:
for aname,aval in tag.attrs:
if aname==method: val=aval
if post_processing:
val=self.post_process(post_processing, val, tag)
if not val: return # don't store empty values
if self.dic.has_key(name):
curval = self.dic[name]
if type(curval)==list: self.dic[name].append(val)
else: self.dic[name]=[self.dic[name],val]
else:
self.dic[name]=val
class GenericScraper (BeautifulSoupScraper):
"""A very simple scraper.
We grab a list of images and all the text.
"""
def __init__ (self):
BeautifulSoupScraper.__init__(self,
[['text',
[{'tag':'body',
# Believe it or not, I've found recipe webpages with
# more than one body tag
'index':[0,None],
}],
'text',
],
['images',
[{'tag':'img',
'index':[0,None]}],
'src',
],
['title',
[{'tag':'title'}],
'text',],
]
)
def scrape (self):
dic = BeautifulSoupScraper.scrape(self)
text = dic.get('title','')+'\n'+dic.get('text','')
images = dic.get('images',[])
if type(images)!=list: images = [images]
images = [urllib.basejoin(self.url,i) for i in images]
return text,images
class FancyTextGetter:
"""Starting with a BeautifulSoup tag, get text in some kind of reasonable w3mish way.
"""
IS_BREAK = ['br']
TWO_LB_BEFORE = ['table','p','blockquote']
LB_BEFORE = ['tr','li']
TAB_BEFORE = ['td']
IGNORE = ['script','meta','select']
def __call__ (self, top_tag, strip=True):
self.text = ''
if hasattr(top_tag,'contents'):
self.add_tag(top_tag)
else:
self.text = top_tag.string
if strip:
self.text = self.text.strip()
# No more than two spaces!
self.text = re.sub('\n\t','\n',self.text)
self.text = re.sub('\n\s*\n\s+','\n\n',self.text)
try:
return unicode(self.text,errors='ignore')
except:
print 'Odd encoding problems with ',self.text
return self.text
def add_tag (self, t):
for item in t.contents: self.get_text_fancy(item)
def get_text_fancy (self, item):
#print 'get_text_fancy looking at:',item
if self.text and hasattr(item,'name'):
if item.name in self.IGNORE: return
if item.name in self.IS_BREAK:
self.text += '\n'
return
elif item.name in self.TWO_LB_BEFORE:
self.text += '\n\n'
elif item.name in self.LB_BEFORE:
self.text += '\n'
elif item.name in self.TAB_BEFORE:
self.text += '\t'
if hasattr(item,'contents'):
self.add_tag(item)
else:
try:
s = item.string.encode('utf8','replace')
self.text += s
except UnicodeDecodeError:
print 'UNICODE DECODING ERROR IN TAG',
if hasattr(item,'name'):
print item.name
if hasattr(item,'fetchParents'):
print 'CHILD OF: ','<'.join([p.name for p in item.fetchParents()])
get_text = FancyTextGetter()
img_src_regexp = re.compile('<img[^>]+src=[\'\"]([^\'"]+)')
def get_image_from_tag (iurl, page_url):
if not iurl: return
iurl = urllib.basejoin(page_url,iurl)
tmpfi,info=urllib.urlretrieve(iurl)
ifi=file(tmpfi,'rb')
retval=ifi.read()
ifi.close()
return retval
def scrape_url (url, progress=None):
if type(url)==str: domain=url.split('/')[2]
if SUPPORTED_URLS.has_key(domain):
bss = BeautifulSoupScraper(SUPPORTED_URLS[domain])
else:
bss = None
for regexp,v in SUPPORTED_URLS_REGEXPS.items():
if re.match(regexp,domain):
bss=BeautifulSoupScraper(v)
break
if bss:
return bss.scrape_url(url,progress=progress)
def add_to_fn (fn):
'''Add 1 to a filename.'''
f,e=os.path.splitext(fn)
try:
f,n=os.path.splitext(f)
n = int(n[1:])
n += 1
return f + "%s%s"%(os.path.extsep,n) + e
except:
return f + "%s1"%os.path.extsep + e
def import_url (url, rd, progress=None, add_webpage_source=True, threaded=False,
interactive=True):
"""Import information from URL.
We handle HTML with scrape_url.
Everything else, we hand back to our caller as a list of
files. This is a little stupid -- it would be more elegant to just
hand back a class, but our importer stuff is a little munged up
with gui-ness and it's just going to have to be ugly for now
"""
if progress: progress(0.01,'Fetching webpage')
sock=urllib.urlopen(url)
header=sock.headers.get('content-type','text/html')
if progress: progress(0.02, 'Reading headers')
if header.find('html')>=0:
#return scrape_url(url,progress)
return WebPageImporter(rd,
url,
prog=progress,
add_webpage_source=add_webpage_source,
threaded=threaded,
interactive=interactive)
elif header=='application/zip':
import zip_importer
return zip_importer.zipfile_to_filelist(sock,progress,os.path.splitext(url.split('/')[-1])[0])
else:
fn = os.path.join(tempfile.tempdir,url.split('/')[-1])
while os.path.exists(fn):
fn=add_to_fn(fn)
ofi = open(fn,'w')
ofi.write(get_url(sock,progress))
ofi.close()
return [fn]
class WebPageImporter (importer.Importer):
"""Import a webpage as a recipe
We use our BeautifulSoupScraper class to do the actual scraping.
We use predefined webpages already registered in the global variable
SUPPORTED_URLS in this module.
If we don't know the web page, we will prompt the user to guide us
through a generic import.
To create a new type of web page import, create a new set of
import rules and register them with SUPPORTED_URLS.
"""
JOIN_AS_PARAGRAPHS = ['instructions','modifications','ingredient_block']
def __init__ (self, rd, url, add_webpage_source=True,
threaded=False, total=0, prog=None,conv=None,
interactive=True):
self.add_webpage_source=add_webpage_source
self.url = url
self.prog = prog
self.interactive = interactive
importer.Importer.__init__(self,rd,threaded=threaded,total=total,prog=prog,do_markup=True,
conv=conv)
def run (self):
"""Import our recipe to our database.
This must be called after self.d is already populated by scraping
our web page.
"""
debug('Scraping url %s'%self.url,0)
try:
self.d = scrape_url(self.url, progress=self.prog)
except:
print 'Trouble using default recipe filter to download %s'%self.url
traceback.print_exc()
print 'We will use a generic importer instead.'
self.d = {}
debug('Scraping url returned %s'%self.d,0)
do_generic = not self.d
if not do_generic:
try:
if self.prog: self.prog(-1,'Parsing webpage based on template.')
self.get_url_based_on_template()
except:
if not self.interactive: raise
do_generic = True
print """Automated HTML Import failed
***Falling back to generic import***
We were attempting to scrape using the following rules:
"""
print self.d
print """The following exception was raised:"""
traceback.print_exc()
print """If you think automated import should have worked for the webpage you
were importing, copy the output starting at "Automated HTML Import failed" into
a bug report and submit it at the GitHub site
https://github.com/thinkle/gourmet/issues
Sorry automated import didn't work. I hope you like
the new generic web importer!
"""
if do_generic:
if not self.interactive:
raise Exception("Unable to find importer for %s" % self.url)
# Interactive we go...
self.prog(-1,_("Don't recognize this webpage. Using generic importer..."))
gs = GenericScraper()
text,images = gs.scrape_url(self.url, progress=self.prog)
if not text and not images:
raise Exception("Unable to obtain text or images from url %s" % self.url)
import interactive_importer
ii = interactive_importer.InteractiveImporter(self.rd)
ii.set_text(text)
ii.add_attribute('link',self.url)
ii.set_images(images)
ii.run()
if self.prog: self.prog(1,_('Import complete.'))
return
def get_url_based_on_template (self):
"""Get URL based on template stored in d
"""
self.start_rec()
# Set link
self.rec['link']=self.url
# Add webpage as source
if self.add_webpage_source:
# add Domain as source
domain = self.url.split('/')[2]
src=self.d.get('source',None)
add_str = '(%s)'%domain
if type(src)==list: src.append(add_str)
elif src: src = [src,add_str]
else: src = domain # no parens if we're the only source
self.d['source']=src
for k,v in self.d.items():
debug('processing %s:%s'%(k,v),1)
if self.prog: self.prog(-1,_('Importing recipe'))
# parsed ingredients...
if k=='ingredient_parsed':
if type(v) != list: v=[v]
for ingdic in v:
if self.prog: self.prog(-1,_('Processing ingredients'))
# we take a special keyword, "text", which gets
# parsed
if ingdic.has_key('text'):
d = self.rd.parse_ingredient(ingdic['text'],conv=self.conv)
if d:
for dk,dv in d.items():
if not ingdic.has_key(dk) or not ingdic[dk]:
ingdic[dk]=dv
elif not ingdic.has_key('item'):
ingdic['item']=ingdic['text']
del ingdic['text']
self.start_ing(**ingdic)
self.commit_ing()
continue
# Listy stuff...
elif type(v)==list:
if k in self.JOIN_AS_PARAGRAPHS: v = "\n".join(v)
else: v = " ".join(v)
# Ingredients in blocks
if k == 'ingredient_block':
for l in v.split('\n'):
if self.prog: self.prog(-1,_('Processing ingredients.'))
dic=self.rd.parse_ingredient(l,conv=self.conv)
if dic:
self.start_ing(**dic)
self.commit_ing()
elif k == 'image':
try:
if v: img = get_image_from_tag(v,self.url)
except:
print 'Error retrieving image'
print 'tried to retrieve image from %s'%v
else:
if img:
self.rec['image'] = img
else: self.rec[k]=v
#print 'COMMITTING RECIPE',self.rec
self.commit_rec()
if self.prog: self.prog(1,_('Import complete.'))
| gpl-2.0 | -2,221,629,064,807,461,600 | 35.989418 | 102 | 0.536166 | false |
grofte/NAMI | ConvertScripts/steponeplus_v2.2-to-NAMI.py | 1 | 1068 | #
# steponeplus_v2.2-to-NAMI.py
#
# Pedro Sousa Lacerda <[email protected]>
# LaBiMM, UFBA
#
# This script converts XLS data from a StepOnePlus (v2.2.2) to CSV format
#
# You need pandas and xlrd installed in order to run this script.
# pip install pandas xlrd
#
# To run type
# python steponeplus_v2.2-to-NAMI.py my_raw_data.xls
# output
# is my_raw_data_NAMI.csv
#
import sys
import csv
from os.path import splitext
import pandas as pd
# IO file paths
xls_path = sys.argv[1]
output_path = splitext(xls_path)[0] + '_NAMI.csv'
# Read sheet
sheet_name = "Multicomponent Data"
data = pd.read_excel(xls_path, sheet_name, header=7)
data.columns = [col.strip() for col in data.columns]
data = data.set_index('Reading')
data.columns = [int(w) for w in range(1, len(data.columns)+1)]
with open(output_path, 'wb') as output_file:
writer = csv.writer(output_file)
writer.writerow(("Temp", "Well", "Intensities"))
for well, intensities in data.iteritems():
for temp, intensity in intensities.iteritems():
writer.writerow((temp, well, intensity))
| bsd-3-clause | -4,377,033,368,681,352,000 | 26.384615 | 73 | 0.70412 | false |
cyx1231st/nova | nova/api/openstack/compute/fixed_ips.py | 11 | 4459 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
import webob.exc
from nova.api.openstack.compute.schemas import fixed_ips
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
from nova import objects
ALIAS = 'os-fixed-ips'
authorize = extensions.os_compute_authorizer(ALIAS)
class FixedIPController(wsgi.Controller):
@wsgi.Controller.api_version('2.1', '2.3')
def _fill_reserved_status(self, req, fixed_ip, fixed_ip_info):
# NOTE(mriedem): To be backwards compatible, < 2.4 version does not
# show anything about reserved status.
pass
@wsgi.Controller.api_version('2.4') # noqa
def _fill_reserved_status(self, req, fixed_ip, fixed_ip_info):
fixed_ip_info['fixed_ip']['reserved'] = fixed_ip.reserved
@extensions.expected_errors((400, 404))
def show(self, req, id):
"""Return data about the given fixed IP."""
context = req.environ['nova.context']
authorize(context)
attrs = ['network', 'instance']
try:
fixed_ip = objects.FixedIP.get_by_address(context, id,
expected_attrs=attrs)
except exception.FixedIpNotFoundForAddress as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
except exception.FixedIpInvalid as ex:
raise webob.exc.HTTPBadRequest(explanation=ex.format_message())
fixed_ip_info = {"fixed_ip": {}}
if fixed_ip is None:
msg = _("Fixed IP %s has been deleted") % id
raise webob.exc.HTTPNotFound(explanation=msg)
fixed_ip_info['fixed_ip']['cidr'] = str(fixed_ip.network.cidr)
fixed_ip_info['fixed_ip']['address'] = str(fixed_ip.address)
if fixed_ip.instance:
fixed_ip_info['fixed_ip']['hostname'] = fixed_ip.instance.hostname
fixed_ip_info['fixed_ip']['host'] = fixed_ip.instance.host
else:
fixed_ip_info['fixed_ip']['hostname'] = None
fixed_ip_info['fixed_ip']['host'] = None
self._fill_reserved_status(req, fixed_ip, fixed_ip_info)
return fixed_ip_info
@wsgi.response(202)
@extensions.expected_errors((400, 404))
@validation.schema(fixed_ips.reserve)
@wsgi.action('reserve')
def reserve(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
return self._set_reserved(context, id, True)
@wsgi.response(202)
@extensions.expected_errors((400, 404))
@validation.schema(fixed_ips.unreserve)
@wsgi.action('unreserve')
def unreserve(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
return self._set_reserved(context, id, False)
def _set_reserved(self, context, address, reserved):
try:
fixed_ip = objects.FixedIP.get_by_address(context, address)
fixed_ip.reserved = reserved
fixed_ip.save()
except exception.FixedIpNotFoundForAddress:
msg = _("Fixed IP %s not found") % address
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.FixedIpInvalid:
msg = _("Fixed IP %s not valid") % address
raise webob.exc.HTTPBadRequest(explanation=msg)
class FixedIps(extensions.V21APIExtensionBase):
"""Fixed IPs support."""
name = "FixedIPs"
alias = ALIAS
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
resources = extensions.ResourceExtension(ALIAS,
FixedIPController(),
member_actions=member_actions)
return [resources]
def get_controller_extensions(self):
return []
| apache-2.0 | 8,456,095,108,908,163,000 | 35.252033 | 78 | 0.630859 | false |
alexus37/AugmentedRealityChess | pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/GL/SGIX/sprite.py | 7 | 3385 | '''OpenGL extension SGIX.sprite
This module customises the behaviour of the
OpenGL.raw.GL.SGIX.sprite to provide a more
Python-friendly API
Overview (from the spec)
This extension provides support for viewpoint dependent alignment
of geometry, in particular geometry that rotates about a point or
a specified axis to face the eye point. The primary use is for
quickly rendering roughly cylindrically or spherically symmetric
objects, e.g. trees, smoke, clouds, etc. using geometry textured
with a partially transparent texture map.
Rendering sprite geometry requires applying a transformation to
primitives before the current model view. This matrix includes a
rotation which is computed based on the current model view matrix
and a translation which is specified explicitly
(SPRITE_TRANSLATION_SGIX). The current model view matrix itself
is not modified.
Primitives are first transformed by a rotation, depending on the
sprite mode:
SPRITE_AXIAL_SGIX: The front of the object is rotated about
an axis so that it faces the eye as much as the axis
constraint allows. This is used for roughly rendering cylindrical
objects such as trees in visual simulation.
SPRITE_OBJECT_ALIGNED_SGIX: The front of the object is
rotated about a point to face the eye with the remaining
rotational degree of freedom specified by aligning the top
of the object with a specified axis in object coordinates.
This is used for spherical objects and special effects such
as smoke which must maintain an alignment in object
coordinates for realism.
SPRITE_EYE_ALIGNED_SGIX: The front of the object is rotated
about a point to face the eye with the remaining rotational
degree of freedom specified by aligning the top of the object
with a specified axis in eye coordinates. This is used for
rendering sprites which must maintain an alignment on the
screen, such as 3D annotations.
The axis of rotation or alignment, SPRITE_AXIS_SGIX, can be
an arbitrary direction to support geocentric coordinate frames
in which "up" is not along X, Y or Z.
Sprite geometry is modeled in a canonical frame: +Z is the up
vector. -Y is the front vector which is rotated to point towards
the eye. In the discussion below, the eye vector is the vector to
the eye from the origin of the model view frame translated by the
sprite position.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGIX/sprite.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.SGIX.sprite import *
from OpenGL.raw.GL.SGIX.sprite import _EXTENSION_NAME
def glInitSpriteSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
# INPUT glSpriteParameterfvSGIX.params size not checked against 'pname'
glSpriteParameterfvSGIX=wrapper.wrapper(glSpriteParameterfvSGIX).setInputArraySize(
'params', None
)
# INPUT glSpriteParameterivSGIX.params size not checked against 'pname'
glSpriteParameterivSGIX=wrapper.wrapper(glSpriteParameterivSGIX).setInputArraySize(
'params', None
)
### END AUTOGENERATED SECTION | mit | -4,355,365,621,870,238,000 | 41.860759 | 83 | 0.770162 | false |
firecat53/py-multistatus | plugins/worker.py | 1 | 1861 | import psutil
from threading import Thread
class Worker(Thread):
"""Worker thread skeleton class.
"""
def __init__(self, cfg, interval, **kwargs):
Thread.__init__(self)
self.cfg = cfg
self.interval = int(interval)
self.daemon = True
def _update_queue(self):
self.cfg.queue.put(self.data)
def _sel_text(self, text):
# Wrap string with selection color, and reset to normal fg color at the
# end
if not text:
return ""
return "{}{}{}{}".format(self.cfg.bar.sel_fg, self.cfg.bar.sel_bg,
text, self.cfg.bar.reset_sym)
def _err_text(self, text):
# Wrap string with error color, and reset to normal fg color at the end
if not text:
return ""
return "{}{}{}{}".format(self.cfg.bar.err_fg, self.cfg.bar.err_bg,
text, self.cfg.bar.reset_sym)
def _color_text(self, text, fg=None, bg=None):
if not text:
return ""
fg = fg or self.cfg.bar.norm_fg
bg = bg or self.cfg.bar.norm_bg
# Wrap text in arbitrary fg/bg colors. Defaults to norm fg, norm bg.
# Resets to norm fg, norm bg.
if fg != self.cfg.bar.norm_fg:
fg = self.cfg.bar.fg_sym.format(fg)
if bg != self.cfg.bar.norm_bg:
bg = self.cfg.bar.bg_sym.format(bg)
return "{}{}{}{}".format(fg, bg, text, self.cfg.bar.reset_sym)
def _out_format(self, text):
"""Add the separator to the output text.
"""
if not text:
return ""
else:
return "{}{}".format(self.cfg.bar.separator, text)
def run(self):
while True:
self.data = self._update_data()
self._update_queue()
psutil.time.sleep(self.interval)
| mit | 3,264,540,620,264,844,300 | 30.542373 | 79 | 0.534121 | false |
timsloan/prometeo-erp | core/auth/admin.py | 3 | 1195 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This file is part of the prometeo project.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
__author__ = 'Emanuele Bertoldi <[email protected]>'
__copyright__ = 'Copyright (c) 2011 Emanuele Bertoldi'
__version__ = '0.0.5'
from django.contrib import admin
from django.contrib.auth.models import Permission
from models import *
class PermissionAdmin(admin.ModelAdmin):
pass
class ObjectPermissionAdmin(admin.ModelAdmin):
pass
admin.site.register(Permission, PermissionAdmin)
admin.site.register(ObjectPermission, ObjectPermissionAdmin)
| lgpl-3.0 | -3,862,497,090,343,114,000 | 33.142857 | 76 | 0.769038 | false |
MridulS/sympy | sympy/core/basic.py | 4 | 59519 | """Base class for all the objects in SymPy"""
from __future__ import print_function, division
from .assumptions import ManagedProperties
from .cache import cacheit
from .core import BasicType, C
from .sympify import _sympify, sympify, SympifyError
from .compatibility import (reduce, iterable, Iterator, ordered,
string_types, with_metaclass, zip_longest)
from .decorators import deprecated
from .singleton import S
from inspect import getmro
class Basic(with_metaclass(ManagedProperties)):
"""
Base class for all objects in SymPy.
Conventions:
1) Always use ``.args``, when accessing parameters of some instance:
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
2) Never use internal methods or variables (the ones prefixed with ``_``):
>>> cot(x)._args # do not use this, use cot(x).args instead
(x,)
"""
__slots__ = ['_mhash', # hash value
'_args', # arguments
'_assumptions'
]
# To be overridden with True in the appropriate subclasses
is_number = False
is_Atom = False
is_Symbol = False
is_Dummy = False
is_Wild = False
is_Function = False
is_Add = False
is_Mul = False
is_Pow = False
is_Number = False
is_Float = False
is_Rational = False
is_Integer = False
is_NumberSymbol = False
is_Order = False
is_Derivative = False
is_Piecewise = False
is_Poly = False
is_AlgebraicNumber = False
is_Relational = False
is_Equality = False
is_Boolean = False
is_Not = False
is_Matrix = False
is_Vector = False
def __new__(cls, *args):
obj = object.__new__(cls)
obj._assumptions = cls.default_assumptions
obj._mhash = None # will be set by __hash__ method.
obj._args = args # all items in args must be Basic objects
return obj
def copy(self):
return self.func(*self.args)
def __reduce_ex__(self, proto):
""" Pickling support."""
return type(self), self.__getnewargs__(), self.__getstate__()
def __getnewargs__(self):
return self.args
def __getstate__(self):
return {}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def __hash__(self):
# hash cannot be cached using cache_it because infinite recurrence
# occurs as hash is needed for setting cache dictionary keys
h = self._mhash
if h is None:
h = hash((type(self).__name__,) + self._hashable_content())
self._mhash = h
return h
def _hashable_content(self):
"""Return a tuple of information about self that can be used to
compute the hash. If a class defines additional attributes,
like ``name`` in Symbol, then this method should be updated
accordingly to return such relevant attributes.
Defining more than _hashable_content is necessary if __eq__ has
been defined by a class. See note about this in Basic.__eq__."""
return self._args
@property
def assumptions0(self):
"""
Return object `type` assumptions.
For example:
Symbol('x', real=True)
Symbol('x', integer=True)
are different objects. In other words, besides Python type (Symbol in
this case), the initial assumptions are also forming their typeinfo.
Examples
========
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x.assumptions0
{'commutative': True}
>>> x = Symbol("x", positive=True)
>>> x.assumptions0
{'commutative': True, 'complex': True, 'hermitian': True,
'imaginary': False, 'negative': False, 'nonnegative': True,
'nonpositive': False, 'nonzero': True, 'positive': True, 'real': True,
'zero': False}
"""
return {}
def compare(self, other):
"""
Return -1, 0, 1 if the object is smaller, equal, or greater than other.
Not in the mathematical sense. If the object is of a different type
from the "other" then their classes are ordered according to
the sorted_classes list.
Examples
========
>>> from sympy.abc import x, y
>>> x.compare(y)
-1
>>> x.compare(x)
0
>>> y.compare(x)
1
"""
# all redefinitions of __cmp__ method should start with the
# following lines:
if self is other:
return 0
n1 = self.__class__
n2 = other.__class__
c = (n1 > n2) - (n1 < n2)
if c:
return c
#
st = self._hashable_content()
ot = other._hashable_content()
c = (len(st) > len(ot)) - (len(st) < len(ot))
if c:
return c
for l, r in zip(st, ot):
l = Basic(*l) if isinstance(l, frozenset) else l
r = Basic(*r) if isinstance(r, frozenset) else r
if isinstance(l, Basic):
c = l.compare(r)
else:
c = (l > r) - (l < r)
if c:
return c
return 0
@staticmethod
def _compare_pretty(a, b):
from sympy.series.order import Order
if isinstance(a, Order) and not isinstance(b, Order):
return 1
if not isinstance(a, Order) and isinstance(b, Order):
return -1
if a.is_Rational and b.is_Rational:
l = a.p * b.q
r = b.p * a.q
return (l > r) - (l < r)
else:
from sympy.core.symbol import Wild
p1, p2, p3 = Wild("p1"), Wild("p2"), Wild("p3")
r_a = a.match(p1 * p2**p3)
if r_a and p3 in r_a:
a3 = r_a[p3]
r_b = b.match(p1 * p2**p3)
if r_b and p3 in r_b:
b3 = r_b[p3]
c = Basic.compare(a3, b3)
if c != 0:
return c
return Basic.compare(a, b)
@classmethod
def fromiter(cls, args, **assumptions):
"""
Create a new object from an iterable.
This is a convenience function that allows one to create objects from
any iterable, without having to convert to a list or tuple first.
Examples
========
>>> from sympy import Tuple
>>> Tuple.fromiter(i for i in range(5))
(0, 1, 2, 3, 4)
"""
return cls(*tuple(args), **assumptions)
@classmethod
def class_key(cls):
"""Nice order of classes. """
return 5, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
"""
Return a sort key.
Examples
========
>>> from sympy.core import S, I
>>> sorted([S(1)/2, I, -I], key=lambda x: x.sort_key())
[1/2, -I, I]
>>> S("[x, 1/x, 1/x**2, x**2, x**(1/2), x**(1/4), x**(3/2)]")
[x, 1/x, x**(-2), x**2, sqrt(x), x**(1/4), x**(3/2)]
>>> sorted(_, key=lambda x: x.sort_key())
[x**(-2), 1/x, x**(1/4), sqrt(x), x, x**(3/2), x**2]
"""
# XXX: remove this when issue 5169 is fixed
def inner_key(arg):
if isinstance(arg, Basic):
return arg.sort_key(order)
else:
return arg
args = self._sorted_args
args = len(args), tuple([ inner_key(arg) for arg in args ])
return self.class_key(), args, S.One.sort_key(), S.One
def __eq__(self, other):
"""Return a boolean indicating whether a == b on the basis of
their symbolic trees.
This is the same as a.compare(b) == 0 but faster.
Notes
=====
If a class that overrides __eq__() needs to retain the
implementation of __hash__() from a parent class, the
interpreter must be told this explicitly by setting __hash__ =
<ParentClass>.__hash__. Otherwise the inheritance of __hash__()
will be blocked, just as if __hash__ had been explicitly set to
None.
References
==========
from http://docs.python.org/dev/reference/datamodel.html#object.__hash__
"""
if self is other:
return True
from .function import AppliedUndef, UndefinedFunction as UndefFunc
if isinstance(self, UndefFunc) and isinstance(other, UndefFunc):
if self.class_key() == other.class_key():
return True
else:
return False
if type(self) is not type(other):
# issue 6100 a**1.0 == a like a**2.0 == a**2
if isinstance(self, C.Pow) and self.exp == 1:
return self.base == other
if isinstance(other, C.Pow) and other.exp == 1:
return self == other.base
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other
if isinstance(self, AppliedUndef) and isinstance(other,
AppliedUndef):
if self.class_key() != other.class_key():
return False
elif type(self) is not type(other):
return False
return self._hashable_content() == other._hashable_content()
def __ne__(self, other):
"""a != b -> Compare two symbolic trees and see whether they are different
this is the same as:
a.compare(b) != 0
but faster
"""
return not self.__eq__(other)
def dummy_eq(self, other, symbol=None):
"""
Compare two expressions and handle dummy symbols.
Examples
========
>>> from sympy import Dummy
>>> from sympy.abc import x, y
>>> u = Dummy('u')
>>> (u**2 + 1).dummy_eq(x**2 + 1)
True
>>> (u**2 + 1) == (x**2 + 1)
False
>>> (u**2 + y).dummy_eq(x**2 + y, x)
True
>>> (u**2 + y).dummy_eq(x**2 + y, y)
False
"""
dummy_symbols = [ s for s in self.free_symbols if s.is_Dummy ]
if not dummy_symbols:
return self == other
elif len(dummy_symbols) == 1:
dummy = dummy_symbols.pop()
else:
raise ValueError(
"only one dummy symbol allowed on the left-hand side")
if symbol is None:
symbols = other.free_symbols
if not symbols:
return self == other
elif len(symbols) == 1:
symbol = symbols.pop()
else:
raise ValueError("specify a symbol in which expressions should be compared")
tmp = dummy.__class__()
return self.subs(dummy, tmp) == other.subs(symbol, tmp)
# Note, we always use the default ordering (lex) in __str__ and __repr__,
# regardless of the global setting. See issue 5487.
def __repr__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def __str__(self):
from sympy.printing import sstr
return sstr(self, order=None)
def atoms(self, *types):
"""Returns the atoms that form the current object.
By default, only objects that are truly atomic and can't
be divided into smaller pieces are returned: symbols, numbers,
and number symbols like I and pi. It is possible to request
atoms of any type, however, as demonstrated below.
Examples
========
>>> from sympy import I, pi, sin
>>> from sympy.abc import x, y
>>> (1 + x + 2*sin(y + I*pi)).atoms()
set([1, 2, I, pi, x, y])
If one or more types are given, the results will contain only
those types of atoms.
Examples
========
>>> from sympy import Number, NumberSymbol, Symbol
>>> (1 + x + 2*sin(y + I*pi)).atoms(Symbol)
set([x, y])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number)
set([1, 2])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol)
set([1, 2, pi])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Number, NumberSymbol, I)
set([1, 2, I, pi])
Note that I (imaginary unit) and zoo (complex infinity) are special
types of number symbols and are not part of the NumberSymbol class.
The type can be given implicitly, too:
>>> (1 + x + 2*sin(y + I*pi)).atoms(x) # x is a Symbol
set([x, y])
Be careful to check your assumptions when using the implicit option
since ``S(1).is_Integer = True`` but ``type(S(1))`` is ``One``, a special type
of sympy atom, while ``type(S(2))`` is type ``Integer`` and will find all
integers in an expression:
>>> from sympy import S
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(1))
set([1])
>>> (1 + x + 2*sin(y + I*pi)).atoms(S(2))
set([1, 2])
Finally, arguments to atoms() can select more than atomic atoms: any
sympy type (loaded in core/__init__.py) can be listed as an argument
and those types of "atoms" as found in scanning the arguments of the
expression recursively:
>>> from sympy import Function, Mul
>>> from sympy.core.function import AppliedUndef
>>> f = Function('f')
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(Function)
set([f(x), sin(y + I*pi)])
>>> (1 + f(x) + 2*sin(y + I*pi)).atoms(AppliedUndef)
set([f(x)])
>>> (1 + x + 2*sin(y + I*pi)).atoms(Mul)
set([I*pi, 2*sin(y + I*pi)])
"""
if types:
types = tuple(
[t if isinstance(t, type) else type(t) for t in types])
else:
types = (Atom,)
result = set()
for expr in preorder_traversal(self):
if isinstance(expr, types):
result.add(expr)
return result
@property
def free_symbols(self):
"""Return from the atoms of self those which are free symbols.
For most expressions, all symbols are free symbols. For some classes
this is not true. e.g. Integrals use Symbols for the dummy variables
which are bound variables, so Integral has a method to return all symbols
except those. Derivative keeps track of symbols with respect to which it
will perform a derivative; those are bound variables, too, so it has
its own symbols method.
Any other method that uses bound variables should implement a symbols
method."""
return reduce(set.union, [a.free_symbols for a in self.args], set())
@property
def canonical_variables(self):
"""Return a dictionary mapping any variable defined in
``self.variables`` as underscore-suffixed numbers
corresponding to their position in ``self.variables``. Enough
underscores are added to ensure that there will be no clash with
existing free symbols.
Examples
========
>>> from sympy import Lambda
>>> from sympy.abc import x
>>> Lambda(x, 2*x).canonical_variables
{x: 0_}
"""
if not hasattr(self, 'variables'):
return {}
u = "_"
while any(s.name.endswith(u) for s in self.free_symbols):
u += "_"
name = '%%i%s' % u
V = self.variables
return dict(list(zip(V, [C.Symbol(name % i, **v.assumptions0)
for i, v in enumerate(V)])))
def rcall(self, *args):
"""Apply on the argument recursively through the expression tree.
This method is used to simulate a common abuse of notation for
operators. For instance in SymPy the the following will not work:
``(x+Lambda(y, 2*y))(z) == x+2*z``,
however you can use
>>> from sympy import Lambda
>>> from sympy.abc import x,y,z
>>> (x + Lambda(y, 2*y)).rcall(z)
x + 2*z
"""
return Basic._recursive_call(self, args)
@staticmethod
def _recursive_call(expr_to_call, on_args):
def the_call_method_is_overridden(expr):
for cls in getmro(type(expr)):
if '__call__' in cls.__dict__:
return cls != Basic
if callable(expr_to_call) and the_call_method_is_overridden(expr_to_call):
if isinstance(expr_to_call, C.Symbol): # XXX When you call a Symbol it is
return expr_to_call # transformed into an UndefFunction
else:
return expr_to_call(*on_args)
elif expr_to_call.args:
args = [Basic._recursive_call(
sub, on_args) for sub in expr_to_call.args]
return type(expr_to_call)(*args)
else:
return expr_to_call
def is_hypergeometric(self, k):
from sympy.simplify import hypersimp
return hypersimp(self, k) is not None
@property
def is_comparable(self):
"""Return True if self can be computed to a real number
with precision, else False.
Examples
========
>>> from sympy import exp_polar, pi, I
>>> (I*exp_polar(I*pi/2)).is_comparable
True
>>> (I*exp_polar(I*pi*2)).is_comparable
False
"""
is_real = self.is_real
if is_real is False:
return False
is_number = self.is_number
if is_number is False:
return False
if is_real and is_number:
return True
n, i = [p.evalf(2) for p in self.as_real_imag()]
if not i.is_Number or not n.is_Number:
return False
if i:
# if _prec = 1 we can't decide and if not,
# the answer is False so return False
return False
else:
return n._prec != 1
@property
def func(self):
"""
The top-level function in an expression.
The following should hold for all objects::
>> x == x.func(*x.args)
Examples
========
>>> from sympy.abc import x
>>> a = 2*x
>>> a.func
<class 'sympy.core.mul.Mul'>
>>> a.args
(2, x)
>>> a.func(*a.args)
2*x
>>> a == a.func(*a.args)
True
"""
return self.__class__
@property
def args(self):
"""Returns a tuple of arguments of 'self'.
Examples
========
>>> from sympy import cot
>>> from sympy.abc import x, y
>>> cot(x).args
(x,)
>>> cot(x).args[0]
x
>>> (x*y).args
(x, y)
>>> (x*y).args[1]
y
Notes
=====
Never use self._args, always use self.args.
Only use _args in __new__ when creating a new function.
Don't override .args() from Basic (so that it's easy to
change the interface in the future if needed).
"""
return self._args
@property
def _sorted_args(self):
"""
The same as ``args``. Derived classes which don't fix an
order on their arguments should override this method to
produce the sorted representation.
"""
return self.args
@deprecated(useinstead="iter(self.args)", issue=7717, deprecated_since_version="0.7.6")
def iter_basic_args(self):
"""
Iterates arguments of ``self``.
"""
return iter(self.args)
def as_poly(self, *gens, **args):
"""Converts ``self`` to a polynomial or returns ``None``.
>>> from sympy import sin
>>> from sympy.abc import x, y
>>> print((x**2 + x*y).as_poly())
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + x*y).as_poly(x, y))
Poly(x**2 + x*y, x, y, domain='ZZ')
>>> print((x**2 + sin(y)).as_poly(x, y))
None
"""
from sympy.polys import Poly, PolynomialError
try:
poly = Poly(self, *gens, **args)
if not poly.is_Poly:
return None
else:
return poly
except PolynomialError:
return None
def as_content_primitive(self, radical=False):
"""A stub to allow Basic args (like Tuple) to be skipped when computing
the content and primitive components of an expression.
See docstring of Expr.as_content_primitive
"""
return S.One, self
def subs(self, *args, **kwargs):
"""
Substitutes old for new in an expression after sympifying args.
`args` is either:
- two arguments, e.g. foo.subs(old, new)
- one iterable argument, e.g. foo.subs(iterable). The iterable may be
o an iterable container with (old, new) pairs. In this case the
replacements are processed in the order given with successive
patterns possibly affecting replacements already made.
o a dict or set whose key/value items correspond to old/new pairs.
In this case the old/new pairs will be sorted by op count and in
case of a tie, by number of args and the default_sort_key. The
resulting sorted list is then processed as an iterable container
(see previous).
If the keyword ``simultaneous`` is True, the subexpressions will not be
evaluated until all the substitutions have been made.
Examples
========
>>> from sympy import pi, exp, limit, oo
>>> from sympy.abc import x, y
>>> (1 + x*y).subs(x, pi)
pi*y + 1
>>> (1 + x*y).subs({x:pi, y:2})
1 + 2*pi
>>> (1 + x*y).subs([(x, pi), (y, 2)])
1 + 2*pi
>>> reps = [(y, x**2), (x, 2)]
>>> (x + y).subs(reps)
6
>>> (x + y).subs(reversed(reps))
x**2 + 2
>>> (x**2 + x**4).subs(x**2, y)
y**2 + y
To replace only the x**2 but not the x**4, use xreplace:
>>> (x**2 + x**4).xreplace({x**2: y})
x**4 + y
To delay evaluation until all substitutions have been made,
set the keyword ``simultaneous`` to True:
>>> (x/y).subs([(x, 0), (y, 0)])
0
>>> (x/y).subs([(x, 0), (y, 0)], simultaneous=True)
nan
This has the added feature of not allowing subsequent substitutions
to affect those already made:
>>> ((x + y)/y).subs({x + y: y, y: x + y})
1
>>> ((x + y)/y).subs({x + y: y, y: x + y}, simultaneous=True)
y/(x + y)
In order to obtain a canonical result, unordered iterables are
sorted by count_op length, number of arguments and by the
default_sort_key to break any ties. All other iterables are left
unsorted.
>>> from sympy import sqrt, sin, cos
>>> from sympy.abc import a, b, c, d, e
>>> A = (sqrt(sin(2*x)), a)
>>> B = (sin(2*x), b)
>>> C = (cos(2*x), c)
>>> D = (x, d)
>>> E = (exp(x), e)
>>> expr = sqrt(sin(2*x))*sin(exp(x)*x)*cos(2*x) + sin(2*x)
>>> expr.subs(dict([A,B,C,D,E]))
a*c*sin(d*e) + b
The resulting expression represents a literal replacement of the
old arguments with the new arguments. This may not reflect the
limiting behavior of the expression:
>>> (x**3 - 3*x).subs({x: oo})
nan
>>> limit(x**3 - 3*x, x, oo)
oo
If the substitution will be followed by numerical
evaluation, it is better to pass the substitution to
evalf as
>>> (1/x).evalf(subs={x: 3.0}, n=21)
0.333333333333333333333
rather than
>>> (1/x).subs({x: 3.0}).evalf(21)
0.333333333333333314830
as the former will ensure that the desired level of precision is
obtained.
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
xreplace: exact node replacement in expr tree; also capable of
using matching rules
evalf: calculates the given formula to a desired level of precision
"""
from sympy.core.containers import Dict
from sympy.utilities import default_sort_key
unordered = False
if len(args) == 1:
sequence = args[0]
if isinstance(sequence, set):
unordered = True
elif isinstance(sequence, (Dict, dict)):
unordered = True
sequence = sequence.items()
elif not iterable(sequence):
from sympy.utilities.misc import filldedent
raise ValueError(filldedent("""
When a single argument is passed to subs
it should be a dictionary of old: new pairs or an iterable
of (old, new) tuples."""))
elif len(args) == 2:
sequence = [args]
else:
raise ValueError("subs accepts either 1 or 2 arguments")
sequence = list(sequence)
for i in range(len(sequence)):
o, n = sequence[i]
so, sn = sympify(o), sympify(n)
if not isinstance(so, Basic):
if type(o) is str:
so = C.Symbol(o)
sequence[i] = (so, sn)
if _aresame(so, sn):
sequence[i] = None
continue
sequence = list(filter(None, sequence))
if unordered:
sequence = dict(sequence)
if not all(k.is_Atom for k in sequence):
d = {}
for o, n in sequence.items():
try:
ops = o.count_ops(), len(o.args)
except TypeError:
ops = (0, 0)
d.setdefault(ops, []).append((o, n))
newseq = []
for k in sorted(d.keys(), reverse=True):
newseq.extend(
sorted([v[0] for v in d[k]], key=default_sort_key))
sequence = [(k, sequence[k]) for k in newseq]
del newseq, d
else:
sequence = sorted([(k, v) for (k, v) in sequence.items()],
key=default_sort_key)
if kwargs.pop('simultaneous', False): # XXX should this be the default for dict subs?
reps = {}
rv = self
kwargs['hack2'] = True
m = C.Dummy()
for old, new in sequence:
d = C.Dummy(commutative=new.is_commutative)
# using d*m so Subs will be used on dummy variables
# in things like Derivative(f(x, y), x) in which x
# is both free and bound
rv = rv._subs(old, d*m, **kwargs)
if not isinstance(rv, Basic):
break
reps[d] = new
reps[m] = S.One # get rid of m
return rv.xreplace(reps)
else:
rv = self
for old, new in sequence:
rv = rv._subs(old, new, **kwargs)
if not isinstance(rv, Basic):
break
return rv
@cacheit
def _subs(self, old, new, **hints):
"""Substitutes an expression old -> new.
If self is not equal to old then _eval_subs is called.
If _eval_subs doesn't want to make any special replacement
then a None is received which indicates that the fallback
should be applied wherein a search for replacements is made
amongst the arguments of self.
>>> from sympy import Add
>>> from sympy.abc import x, y, z
Examples
========
Add's _eval_subs knows how to target x + y in the following
so it makes the change:
>>> (x + y + z).subs(x + y, 1)
z + 1
Add's _eval_subs doesn't need to know how to find x + y in
the following:
>>> Add._eval_subs(z*(x + y) + 3, x + y, 1) is None
True
The returned None will cause the fallback routine to traverse the args and
pass the z*(x + y) arg to Mul where the change will take place and the
substitution will succeed:
>>> (z*(x + y) + 3).subs(x + y, 1)
z + 3
** Developers Notes **
An _eval_subs routine for a class should be written if:
1) any arguments are not instances of Basic (e.g. bool, tuple);
2) some arguments should not be targeted (as in integration
variables);
3) if there is something other than a literal replacement
that should be attempted (as in Piecewise where the condition
may be updated without doing a replacement).
If it is overridden, here are some special cases that might arise:
1) If it turns out that no special change was made and all
the original sub-arguments should be checked for
replacements then None should be returned.
2) If it is necessary to do substitutions on a portion of
the expression then _subs should be called. _subs will
handle the case of any sub-expression being equal to old
(which usually would not be the case) while its fallback
will handle the recursion into the sub-arguments. For
example, after Add's _eval_subs removes some matching terms
it must process the remaining terms so it calls _subs
on each of the un-matched terms and then adds them
onto the terms previously obtained.
3) If the initial expression should remain unchanged then
the original expression should be returned. (Whenever an
expression is returned, modified or not, no further
substitution of old -> new is attempted.) Sum's _eval_subs
routine uses this strategy when a substitution is attempted
on any of its summation variables.
"""
def fallback(self, old, new):
"""
Try to replace old with new in any of self's arguments.
"""
hit = False
args = list(self.args)
for i, arg in enumerate(args):
if not hasattr(arg, '_eval_subs'):
continue
arg = arg._subs(old, new, **hints)
if not _aresame(arg, args[i]):
hit = True
args[i] = arg
if hit:
rv = self.func(*args)
hack2 = hints.get('hack2', False)
if hack2 and self.is_Mul and not rv.is_Mul: # 2-arg hack
coeff = S.One
nonnumber = []
for i in args:
if i.is_Number:
coeff *= i
else:
nonnumber.append(i)
nonnumber = self.func(*nonnumber)
if coeff is S.One:
return nonnumber
else:
return self.func(coeff, nonnumber, evaluate=False)
return rv
return self
if _aresame(self, old):
return new
rv = self._eval_subs(old, new)
if rv is None:
rv = fallback(self, old, new)
return rv
def _eval_subs(self, old, new):
"""Override this stub if you want to do anything more than
attempt a replacement of old with new in the arguments of self.
See also: _subs
"""
return None
def xreplace(self, rule):
"""
Replace occurrences of objects within the expression.
Parameters
==========
rule : dict-like
Expresses a replacement rule
Returns
=======
xreplace : the result of the replacement
Examples
========
>>> from sympy import symbols, pi, exp
>>> x, y, z = symbols('x y z')
>>> (1 + x*y).xreplace({x: pi})
pi*y + 1
>>> (1 + x*y).xreplace({x:pi, y:2})
1 + 2*pi
Replacements occur only if an entire node in the expression tree is
matched:
>>> (x*y + z).xreplace({x*y: pi})
z + pi
>>> (x*y*z).xreplace({x*y: pi})
x*y*z
>>> (2*x).xreplace({2*x: y, x: z})
y
>>> (2*2*x).xreplace({2*x: y, x: z})
4*z
>>> (x + y + 2).xreplace({x + y: 2})
x + y + 2
>>> (x + 2 + exp(x + 2)).xreplace({x + 2: y})
x + exp(y) + 2
xreplace doesn't differentiate between free and bound symbols. In the
following, subs(x, y) would not change x since it is a bound symbol,
but xreplace does:
>>> from sympy import Integral
>>> Integral(x, (x, 1, 2*x)).xreplace({x: y})
Integral(y, (y, 1, 2*y))
Trying to replace x with an expression raises an error:
>>> Integral(x, (x, 1, 2*x)).xreplace({x: 2*y}) #doctest: +SKIP
ValueError: Invalid limits given: ((2*y, 1, 4*y),)
See Also
========
replace: replacement capable of doing wildcard-like matching,
parsing of match, and conditional replacements
subs: substitution of subexpressions as defined by the objects
themselves.
"""
if self in rule:
return rule[self]
elif rule:
args = []
for a in self.args:
try:
args.append(a.xreplace(rule))
except AttributeError:
args.append(a)
args = tuple(args)
if not _aresame(args, self.args):
return self.func(*args)
return self
@cacheit
def has(self, *patterns):
"""
Test whether any subexpression matches any of the patterns.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x, y, z
>>> (x**2 + sin(x*y)).has(z)
False
>>> (x**2 + sin(x*y)).has(x, y, z)
True
>>> x.has(x)
True
Note that ``expr.has(*patterns)`` is exactly equivalent to
``any(expr.has(p) for p in patterns)``. In particular, ``False`` is
returned when the list of patterns is empty.
>>> x.has()
False
"""
return any(self._has(pattern) for pattern in patterns)
def _has(self, pattern):
"""Helper for .has()"""
from sympy.core.function import UndefinedFunction, Function
if isinstance(pattern, UndefinedFunction):
return any(f.func == pattern or f == pattern
for f in self.atoms(Function, UndefinedFunction))
pattern = sympify(pattern)
if isinstance(pattern, BasicType):
return any(isinstance(arg, pattern)
for arg in preorder_traversal(self))
try:
match = pattern._has_matcher()
return any(match(arg) for arg in preorder_traversal(self))
except AttributeError:
return any(arg == pattern for arg in preorder_traversal(self))
def _has_matcher(self):
"""Helper for .has()"""
return self.__eq__
def replace(self, query, value, map=False, simultaneous=True, exact=False):
"""
Replace matching subexpressions of ``self`` with ``value``.
If ``map = True`` then also return the mapping {old: new} where ``old``
was a sub-expression found with query and ``new`` is the replacement
value for it. If the expression itself doesn't match the query, then
the returned value will be ``self.xreplace(map)`` otherwise it should
be ``self.subs(ordered(map.items()))``.
Traverses an expression tree and performs replacement of matching
subexpressions from the bottom to the top of the tree. The default
approach is to do the replacement in a simultaneous fashion so
changes made are targeted only once. If this is not desired or causes
problems, ``simultaneous`` can be set to False. In addition, if an
expression containing more than one Wild symbol is being used to match
subexpressions and the ``exact`` flag is True, then the match will only
succeed if non-zero values are received for each Wild that appears in
the match pattern.
The list of possible combinations of queries and replacement values
is listed below:
Examples
========
Initial setup
>>> from sympy import log, sin, cos, tan, Wild, Mul, Add
>>> from sympy.abc import x, y
>>> f = log(sin(x)) + tan(sin(x**2))
1.1. type -> type
obj.replace(type, newtype)
When object of type ``type`` is found, replace it with the
result of passing its argument(s) to ``newtype``.
>>> f.replace(sin, cos)
log(cos(x)) + tan(cos(x**2))
>>> sin(x).replace(sin, cos, map=True)
(cos(x), {sin(x): cos(x)})
>>> (x*y).replace(Mul, Add)
x + y
1.2. type -> func
obj.replace(type, func)
When object of type ``type`` is found, apply ``func`` to its
argument(s). ``func`` must be written to handle the number
of arguments of ``type``.
>>> f.replace(sin, lambda arg: sin(2*arg))
log(sin(2*x)) + tan(sin(2*x**2))
>>> (x*y).replace(Mul, lambda *args: sin(2*Mul(*args)))
sin(2*x*y)
2.1. pattern -> expr
obj.replace(pattern(wild), expr(wild))
Replace subexpressions matching ``pattern`` with the expression
written in terms of the Wild symbols in ``pattern``.
>>> a = Wild('a')
>>> f.replace(sin(a), tan(a))
log(tan(x)) + tan(tan(x**2))
>>> f.replace(sin(a), tan(a/2))
log(tan(x/2)) + tan(tan(x**2/2))
>>> f.replace(sin(a), a)
log(x) + tan(x**2)
>>> (x*y).replace(a*x, a)
y
When the default value of False is used with patterns that have
more than one Wild symbol, non-intuitive results may be obtained:
>>> b = Wild('b')
>>> (2*x).replace(a*x + b, b - a)
2/x
For this reason, the ``exact`` option can be used to make the
replacement only when the match gives non-zero values for all
Wild symbols:
>>> (2*x + y).replace(a*x + b, b - a, exact=True)
y - 2
>>> (2*x).replace(a*x + b, b - a, exact=True)
2*x
2.2. pattern -> func
obj.replace(pattern(wild), lambda wild: expr(wild))
All behavior is the same as in 2.1 but now a function in terms of
pattern variables is used rather than an expression:
>>> f.replace(sin(a), lambda a: sin(2*a))
log(sin(2*x)) + tan(sin(2*x**2))
3.1. func -> func
obj.replace(filter, func)
Replace subexpression ``e`` with ``func(e)`` if ``filter(e)``
is True.
>>> g = 2*sin(x**3)
>>> g.replace(lambda expr: expr.is_Number, lambda expr: expr**2)
4*sin(x**9)
The expression itself is also targeted by the query but is done in
such a fashion that changes are not made twice.
>>> e = x*(x*y + 1)
>>> e.replace(lambda x: x.is_Mul, lambda x: 2*x)
2*x*(2*x*y + 1)
See Also
========
subs: substitution of subexpressions as defined by the objects
themselves.
xreplace: exact node replacement in expr tree; also capable of
using matching rules
"""
from sympy.core.symbol import Dummy
from sympy.simplify.simplify import bottom_up
try:
query = sympify(query)
except SympifyError:
pass
try:
value = sympify(value)
except SympifyError:
pass
if isinstance(query, type):
_query = lambda expr: isinstance(expr, query)
if isinstance(value, type):
_value = lambda expr, result: value(*expr.args)
elif callable(value):
_value = lambda expr, result: value(*expr.args)
else:
raise TypeError(
"given a type, replace() expects another "
"type or a callable")
elif isinstance(query, Basic):
_query = lambda expr: expr.match(query)
# XXX remove the exact flag and make multi-symbol
# patterns use exact=True semantics; to do this the query must
# be tested to find out how many Wild symbols are present.
# See https://groups.google.com/forum/
# ?fromgroups=#!topic/sympy/zPzo5FtRiqI
# for a method of inspecting a function to know how many
# parameters it has.
if isinstance(value, Basic):
if exact:
_value = lambda expr, result: (value.subs(result)
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value.subs(result)
elif callable(value):
# match dictionary keys get the trailing underscore stripped
# from them and are then passed as keywords to the callable;
# if ``exact`` is True, only accept match if there are no null
# values amongst those matched.
if exact:
_value = lambda expr, result: (value(**dict([ (
str(key)[:-1], val) for key, val in result.items()]))
if all(val for val in result.values()) else expr)
else:
_value = lambda expr, result: value(**dict([ (
str(key)[:-1], val) for key, val in result.items()]))
else:
raise TypeError(
"given an expression, replace() expects "
"another expression or a callable")
elif callable(query):
_query = query
if callable(value):
_value = lambda expr, result: value(expr)
else:
raise TypeError(
"given a callable, replace() expects "
"another callable")
else:
raise TypeError(
"first argument to replace() must be a "
"type, an expression or a callable")
mapping = {} # changes that took place
mask = [] # the dummies that were used as change placeholders
def rec_replace(expr):
result = _query(expr)
if result or result == {}:
new = _value(expr, result)
if new is not None and new != expr:
mapping[expr] = new
if simultaneous:
# don't let this expression be changed during rebuilding
com = getattr(new, 'is_commutative', True)
if com is None:
com = True
d = Dummy(commutative=com)
mask.append((d, new))
expr = d
else:
expr = new
return expr
rv = bottom_up(self, rec_replace, atoms=True)
# restore original expressions for Dummy symbols
if simultaneous:
mask = list(reversed(mask))
for o, n in mask:
r = {o: n}
rv = rv.xreplace(r)
if not map:
return rv
else:
if simultaneous:
# restore subexpressions in mapping
for o, n in mask:
r = {o: n}
mapping = dict([(k.xreplace(r), v.xreplace(r))
for k, v in mapping.items()])
return rv, mapping
def find(self, query, group=False):
"""Find all subexpressions matching a query. """
query = _make_find_query(query)
results = list(filter(query, preorder_traversal(self)))
if not group:
return set(results)
else:
groups = {}
for result in results:
if result in groups:
groups[result] += 1
else:
groups[result] = 1
return groups
def count(self, query):
"""Count the number of matching subexpressions. """
query = _make_find_query(query)
return sum(bool(query(sub)) for sub in preorder_traversal(self))
def matches(self, expr, repl_dict={}, old=False):
"""
Helper method for match() that looks for a match between Wild symbols
in self and expressions in expr.
Examples
========
>>> from sympy import symbols, Wild, Basic
>>> a, b, c = symbols('a b c')
>>> x = Wild('x')
>>> Basic(a + x, x).matches(Basic(a + b, c)) is None
True
>>> Basic(a + x, x).matches(Basic(a + b + c, b + c))
{x_: b + c}
"""
expr = sympify(expr)
if not isinstance(expr, self.__class__):
return None
if self == expr:
return repl_dict
if len(self.args) != len(expr.args):
return None
d = repl_dict.copy()
for arg, other_arg in zip(self.args, expr.args):
if arg == other_arg:
continue
d = arg.xreplace(d).matches(other_arg, d, old=old)
if d is None:
return None
return d
def match(self, pattern, old=False):
"""
Pattern matching.
Wild symbols match all.
Return ``None`` when expression (self) does not match
with pattern. Otherwise return a dictionary such that::
pattern.xreplace(self.match(pattern)) == self
Examples
========
>>> from sympy import Wild
>>> from sympy.abc import x, y
>>> p = Wild("p")
>>> q = Wild("q")
>>> r = Wild("r")
>>> e = (x+y)**(x+y)
>>> e.match(p**p)
{p_: x + y}
>>> e.match(p**q)
{p_: x + y, q_: x + y}
>>> e = (2*x)**2
>>> e.match(p*q**r)
{p_: 4, q_: x, r_: 2}
>>> (p*q**r).xreplace(e.match(p*q**r))
4*x**2
The ``old`` flag will give the old-style pattern matching where
expressions and patterns are essentially solved to give the
match. Both of the following give None unless ``old=True``:
>>> (x - 2).match(p - x, old=True)
{p_: 2*x - 2}
>>> (2/x).match(p*x, old=True)
{p_: 2/x**2}
"""
from sympy import signsimp
pattern = sympify(pattern)
s = signsimp(self)
p = signsimp(pattern)
# if we still have the same relationship between the types of
# input, then use the sign simplified forms
if (pattern.func == self.func) and (s.func == p.func):
rv = p.matches(s, old=old)
else:
rv = pattern.matches(self, old=old)
return rv
def count_ops(self, visual=None):
"""wrapper for count_ops that returns the operation count."""
from sympy import count_ops
return count_ops(self, visual)
def doit(self, **hints):
"""Evaluate objects that are not evaluated by default like limits,
integrals, sums and products. All objects of this kind will be
evaluated recursively, unless some species were excluded via 'hints'
or unless the 'deep' hint was set to 'False'.
>>> from sympy import Integral
>>> from sympy.abc import x
>>> 2*Integral(x, x)
2*Integral(x, x)
>>> (2*Integral(x, x)).doit()
x**2
>>> (2*Integral(x, x)).doit(deep = False)
2*Integral(x, x)
"""
if hints.get('deep', True):
terms = [ term.doit(**hints) if isinstance(term, Basic) else term
for term in self.args ]
return self.func(*terms)
else:
return self
def _eval_rewrite(self, pattern, rule, **hints):
if self.is_Atom:
if hasattr(self, rule):
return getattr(self, rule)()
return self
if hints.get('deep', True):
args = [ a._eval_rewrite(pattern, rule, **hints)
if isinstance(a, Basic) else a
for a in self.args ]
else:
args = self.args
if pattern is None or isinstance(self.func, pattern):
if hasattr(self, rule):
rewritten = getattr(self, rule)(*args)
if rewritten is not None:
return rewritten
return self.func(*args)
def rewrite(self, *args, **hints):
""" Rewrite functions in terms of other functions.
Rewrites expression containing applications of functions
of one kind in terms of functions of different kind. For
example you can rewrite trigonometric functions as complex
exponentials or combinatorial functions as gamma function.
As a pattern this function accepts a list of functions to
to rewrite (instances of DefinedFunction class). As rule
you can use string or a destination function instance (in
this case rewrite() will use the str() function).
There is also the possibility to pass hints on how to rewrite
the given expressions. For now there is only one such hint
defined called 'deep'. When 'deep' is set to False it will
forbid functions to rewrite their contents.
Examples
========
>>> from sympy import sin, exp
>>> from sympy.abc import x
Unspecified pattern:
>>> sin(x).rewrite(exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a single function:
>>> sin(x).rewrite(sin, exp)
-I*(exp(I*x) - exp(-I*x))/2
Pattern as a list of functions:
>>> sin(x).rewrite([sin, ], exp)
-I*(exp(I*x) - exp(-I*x))/2
"""
if not args:
return self
else:
pattern = args[:-1]
if isinstance(args[-1], string_types):
rule = '_eval_rewrite_as_' + args[-1]
else:
rule = '_eval_rewrite_as_' + args[-1].__name__
if not pattern:
return self._eval_rewrite(None, rule, **hints)
else:
if iterable(pattern[0]):
pattern = pattern[0]
pattern = [ p.__class__ for p in pattern if self.has(p) ]
if pattern:
return self._eval_rewrite(tuple(pattern), rule, **hints)
else:
return self
@property
@deprecated(useinstead="is_finite", issue=8071, deprecated_since_version="0.7.6")
def is_bounded(self):
return super(Basic, self).__getattribute__('is_finite')
@property
@deprecated(useinstead="is_infinite", issue=8071, deprecated_since_version="0.7.6")
def is_unbounded(self):
return super(Basic, self).__getattribute__('is_infinite')
@deprecated(useinstead="is_zero", issue=8071, deprecated_since_version="0.7.6")
def is_infinitesimal(self):
return super(Basic, self).__getattribute__('is_zero')
class Atom(Basic):
"""
A parent class for atomic things. An atom is an expression with no subexpressions.
Examples
========
Symbol, Number, Rational, Integer, ...
But not: Add, Mul, Pow, ...
"""
is_Atom = True
__slots__ = []
def matches(self, expr, repl_dict={}, old=False):
if self == expr:
return repl_dict
def xreplace(self, rule, hack2=False):
return rule.get(self, self)
def doit(self, **hints):
return self
@classmethod
def class_key(cls):
return 2, 0, cls.__name__
@cacheit
def sort_key(self, order=None):
from sympy.core import S
return self.class_key(), (1, (str(self),)), S.One.sort_key(), S.One
def _eval_simplify(self, ratio, measure):
return self
@property
def _sorted_args(self):
# this is here as a safeguard against accidentally using _sorted_args
# on Atoms -- they cannot be rebuilt as atom.func(*atom._sorted_args)
# since there are no args. So the calling routine should be checking
# to see that this property is not called for Atoms.
raise AttributeError('Atoms have no args. It might be necessary'
' to make a check for Atoms in the calling code.')
def _aresame(a, b):
"""Return True if a and b are structurally the same, else False.
Examples
========
To SymPy, 2.0 == 2:
>>> from sympy import S
>>> 2.0 == S(2)
True
Since a simple 'same or not' result is sometimes useful, this routine was
written to provide that query:
>>> from sympy.core.basic import _aresame
>>> _aresame(S(2.0), S(2))
False
"""
from .function import AppliedUndef, UndefinedFunction as UndefFunc
for i, j in zip_longest(preorder_traversal(a), preorder_traversal(b)):
if i != j or type(i) != type(j):
if ((isinstance(i, UndefFunc) and isinstance(j, UndefFunc)) or
(isinstance(i, AppliedUndef) and isinstance(j, AppliedUndef))):
if i.class_key() != j.class_key():
return False
else:
return False
else:
return True
def _atomic(e):
"""Return atom-like quantities as far as substitution is
concerned: Derivatives, Functions and Symbols. Don't
return any 'atoms' that are inside such quantities unless
they also appear outside, too.
Examples
========
>>> from sympy import Derivative, Function, cos
>>> from sympy.abc import x, y
>>> from sympy.core.basic import _atomic
>>> f = Function('f')
>>> _atomic(x + y)
set([x, y])
>>> _atomic(x + f(y))
set([x, f(y)])
>>> _atomic(Derivative(f(x), x) + cos(x) + y)
set([y, cos(x), Derivative(f(x), x)])
"""
from sympy import Derivative, Function, Symbol
pot = preorder_traversal(e)
seen = set()
try:
free = e.free_symbols
except AttributeError:
return set([e])
atoms = set()
for p in pot:
if p in seen:
pot.skip()
continue
seen.add(p)
if isinstance(p, Symbol) and p in free:
atoms.add(p)
elif isinstance(p, (Derivative, Function)):
pot.skip()
atoms.add(p)
return atoms
class preorder_traversal(Iterator):
"""
Do a pre-order traversal of a tree.
This iterator recursively yields nodes that it has visited in a pre-order
fashion. That is, it yields the current node then descends through the
tree breadth-first to yield all of a node's children's pre-order
traversal.
For an expression, the order of the traversal depends on the order of
.args, which in many cases can be arbitrary.
Parameters
==========
node : sympy expression
The expression to traverse.
keys : (default None) sort key(s)
The key(s) used to sort args of Basic objects. When None, args of Basic
objects are processed in arbitrary order. If key is defined, it will
be passed along to ordered() as the only key(s) to use to sort the
arguments; if ``key`` is simply True then the default keys of ordered
will be used.
Yields
======
subtree : sympy expression
All of the subtrees in the tree.
Examples
========
>>> from sympy import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
The nodes are returned in the order that they are encountered unless key
is given; simply passing key=True will guarantee that the traversal is
unique.
>>> list(preorder_traversal((x + y)*z, keys=None)) # doctest: +SKIP
[z*(x + y), z, x + y, y, x]
>>> list(preorder_traversal((x + y)*z, keys=True))
[z*(x + y), z, x + y, x, y]
"""
def __init__(self, node, keys=None):
self._skip_flag = False
self._pt = self._preorder_traversal(node, keys)
def _preorder_traversal(self, node, keys):
yield node
if self._skip_flag:
self._skip_flag = False
return
if isinstance(node, Basic):
if not keys and hasattr(node, '_argset'):
# LatticeOp keeps args as a set. We should use this if we
# don't care about the order, to prevent unnecessary sorting.
args = node._argset
else:
args = node.args
if keys:
if keys != True:
args = ordered(args, keys, default=False)
else:
args = ordered(args)
for arg in args:
for subtree in self._preorder_traversal(arg, keys):
yield subtree
elif iterable(node):
for item in node:
for subtree in self._preorder_traversal(item, keys):
yield subtree
def skip(self):
"""
Skip yielding current node's (last yielded node's) subtrees.
Examples
--------
>>> from sympy.core import symbols
>>> from sympy.core.basic import preorder_traversal
>>> x, y, z = symbols('x y z')
>>> pt = preorder_traversal((x+y*z)*z)
>>> for i in pt:
... print(i)
... if i == x+y*z:
... pt.skip()
z*(x + y*z)
z
x + y*z
"""
self._skip_flag = True
def __next__(self):
return next(self._pt)
def __iter__(self):
return self
def _make_find_query(query):
"""Convert the argument of Basic.find() into a callable"""
try:
query = sympify(query)
except SympifyError:
pass
if isinstance(query, type):
return lambda expr: isinstance(expr, query)
elif isinstance(query, Basic):
return lambda expr: expr.match(query) is not None
return query
| bsd-3-clause | -1,768,743,844,078,496,000 | 31.347283 | 94 | 0.521783 | false |
matt-desmarais/P0Wcrosshair | Book.py | 1 | 1837 | addressbook = {
'MAG_ADDRESS': 0x1E,
'ACC_ADDRESS': 0x1E,
'GYR_ADDRESS' : 0x6A,
#LSM9DS0 Gyro Registers
'WHO_AM_I_G': 0x0F,
'CTRL_REG1_G': 0x20,
'CTRL_REG2_G': 0x21,
'CTRL_REG3_G': 0x22,
'CTRL_REG4_G': 0x23,
'CTRL_REG5_G': 0x24,
'REFERENCE_G': 0x25,
'STATUS_REG_G': 0x27,
'OUT_X_L_G': 0x28,
'OUT_X_H_G': 0x29,
'OUT_Y_L_G': 0x2A,
'OUT_Y_H_G': 0x2B,
'OUT_Z_L_G': 0x2C,
'OUT_Z_H_G': 0x2D,
'FIFO_CTRL_REG_G': 0x2E,
'FIFO_SRC_REG_G': 0x2F,
'INT1_CFG_G': 0x30 ,
'INT1_SRC_G' : 0x31 ,
'INT1_THS_XH_G': 0x32,
'INT1_THS_XL_G': 0x33,
'INT1_THS_YH_G': 0x34,
'INT1_THS_YL_G': 0x35,
'INT1_THS_ZH_G': 0x36,
'INT1_THS_ZL_G': 0x37,
'INT1_DURATION_G': 0x38,
#LSM9DS0 Accel and Magneto Registers
'OUT_TEMP_L_XM': 0x05,
'OUT_TEMP_H_XM': 0x06,
'STATUS_REG_M': 0x07,
'OUT_X_L_M': 0x08,
'OUT_X_H_M': 0x09,
'OUT_Y_L_M': 0x0A,
'OUT_Y_H_M': 0x0B,
'OUT_Z_L_M': 0x0C,
'OUT_Z_H_M': 0x0D,
'WHO_AM_I_XM': 0x0F,
'INT_CTRL_REG_M': 0x12,
'INT_SRC_REG_M': 0x13,
'INT_THS_L_M': 0x14,
'INT_THS_H_M': 0x15,
'OFFSET_X_L_M': 0x16,
'OFFSET_X_H_M': 0x17,
'OFFSET_Y_L_M': 0x18,
'OFFSET_Y_H_M': 0x19,
'OFFSET_Z_L_M': 0x1A,
'OFFSET_Z_H_M': 0x1B,
'REFERENCE_X': 0x1C,
'REFERENCE_Y': 0x1D,
'REFERENCE_Z': 0x1E,
'CTRL_REG0_XM': 0x1F,
'CTRL_REG1_XM': 0x20,
'CTRL_REG2_XM': 0x21,
'CTRL_REG3_XM': 0x22,
'CTRL_REG4_XM': 0x23,
'CTRL_REG5_XM': 0x24,
'CTRL_REG6_XM': 0x25,
'CTRL_REG7_XM': 0x26,
'STATUS_REG_A': 0x27,
'OUT_X_L_A': 0x28,
'OUT_X_H_A': 0x29,
'OUT_Y_L_A': 0x2A,
'OUT_Y_H_A': 0x2B,
'OUT_Z_L_A': 0x2C,
'OUT_Z_H_A': 0x2D,
'FIFO_CTRL_REG': 0x2E,
'FIFO_SRC_REG': 0x2F,
'INT_GEN_1_REG': 0x30,
'INT_GEN_1_SRC': 0x31,
'INT_GEN_1_THS': 0x32,
'INT_GEN_1_DURATION': 0x33,
'INT_GEN_2_REG': 0x34,
'INT_GEN_2_SRC': 0x35,
'INT_GEN_2_THS': 0x36,
'INT_GEN_2_DURATION': 0x37,
'CLICK_CFG': 0x38,
'CLICK_SRC': 0x39,
'CLICK_THS': 0x3A,
'TIME_LIMIT': 0x3B,
'TIME_LATENCY': 0x3C,
'TIME_WINDOW': 0x3D}
| mit | -1,842,013,110,187,638,800 | 20.611765 | 36 | 0.612956 | false |
samnashi/howdoflawsgetlonger | Conv1D_LSTM_AuxRegressor.py | 1 | 20107 | from __future__ import print_function
import numpy as np
from random import shuffle
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import pickle as pkl
from keras.models import Sequential, Model, load_model
from keras.utils import plot_model
from keras.layers import Dense, LSTM, GRU, Flatten, Input, Reshape, TimeDistributed, Bidirectional, Dense, Dropout, \
Activation, Flatten, Conv1D, MaxPooling1D, GlobalAveragePooling1D, AveragePooling1D, concatenate, BatchNormalization
from keras.initializers import lecun_normal, glorot_normal
from keras.regularizers import l1, l1_l2, l2
from keras import metrics
from keras.optimizers import adam, rmsprop
import pandas as pd
import scipy.io as sio
from keras.callbacks import CSVLogger, TerminateOnNaN
import os
import csv
import json
import scattergro_utils as sg_utils
import sklearn.preprocessing
#import xgboost as xgb
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error, explained_variance_score, \
r2_score
from sklearn.kernel_ridge import KernelRidge
import time
from sklearn.externals import joblib
from sklearn.multioutput import MultiOutputRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.linear_model import ElasticNet
from Conv1D_LSTM_Ensemble import pair_generator_1dconv_lstm_bagged
from AuxRegressor import create_training_set,create_testing_set,create_model_list,generate_model_id
'''This loads a saved Keras model and uses it as a feature extractor, which then feeds into Scikit-learn multivariate regressors.
The generators created need to match what the Keras model asks for.'''
# @@@@@@@@@@@@@@ RELATIVE PATHS @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Base_Path = "./"
image_path = "./images/"
train_path = "./train/"
test_path = "./test/"
analysis_path = "./analysis/"
models_path = analysis_path + "models_to_load/"
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
if __name__ == "__main__":
num_sequence_draws = 25
GENERATOR_BATCH_SIZE = 128
num_epochs = 1 #because you gotta feed the base model the same way you fed it during training... (RNN commandments)
save_preds = False
#create the data-pair filenames (using zip), use the helper methods
train_set_filenames = create_training_set()
test_set_filenames = create_testing_set()
model_filenames = create_model_list()
print(train_set_filenames)
print(test_set_filenames)
print(model_filenames)
# load model
#identifier shouldn't have a leading underscore!
#TODO!!!!! REVERT BACK
for model in model_filenames:
identifier_post_training = model
#identifier_post_training = "bag_conv_lstm_dense_tiny_shufstart_softplus_ca_tanh_da_3_cbd_standard_per_batch_sclr_l1l2_kr_HLR.h5"
# './' + identifier_post_training + '.h5'
raw_base_model = load_model(models_path + model)
time_dict = {}
#switch to load item in that model_filenames list.
print("using: {} as model".format(model))
label_scaler_aux_regressor = StandardScaler()
train_start_time = time.clock()
tree_regressor_check_cond = False
for i in range(0, num_sequence_draws):
index_to_load = np.random.randint(0, len(train_set_filenames)) # switch to iterations
files = train_set_filenames[index_to_load]
print("files: {}".format(files))
data_load_path = train_path + '/data/' + files[0]
label_load_path = train_path + '/label/' + files[1]
# print("data/label load path: {} \n {}".format(data_load_path,label_load_path))
train_array = np.load(data_load_path)
label_array = np.load(label_load_path)[:, 1:]
if train_array.shape[1] != 11:
train_array = train_array[:, 1:]
print("data/label shape: {}, {}, draw #: {}".format(train_array.shape, label_array.shape, i))
aux_reg_train_generator = pair_generator_1dconv_lstm_bagged(
train_array, label_array, start_at=0, generator_batch_size=GENERATOR_BATCH_SIZE,use_precomputed_coeffs=False,
scaled=True, scaler_type = 'standard_per_batch',no_labels=True)
num_generator_yields = train_array.shape[0]//GENERATOR_BATCH_SIZE
base_model = Model(inputs=raw_base_model.input, outputs=raw_base_model.get_layer(name='dense_post_concat').output)
base_model_output = base_model.predict_generator(aux_reg_train_generator,
steps=num_generator_yields)
#print(type(base_model_output))
print("base model output shape: {}".format(base_model_output.shape)) #1,128,64 (if steps=1) or
# num_generator_yields,GENERATOR_BATCH_SIZE, num of neurons in dense_after_concat
base_model_output_2d_shape = (base_model_output.shape[0] * base_model_output.shape[1], base_model_output.shape[2])
base_model_output_2d = np.zeros(shape=base_model_output_2d_shape)
for reshape_counter in range(0,base_model_output.shape[0]):
base_model_output_2d[reshape_counter:reshape_counter + GENERATOR_BATCH_SIZE, :] = np.reshape(
base_model_output[reshape_counter,:,:],newshape=(GENERATOR_BATCH_SIZE,base_model_output.shape[2])) #1,128,64 to 128,64
reshape_counter += 1
print("pretrained net's output shape: {}".format(base_model_output_2d.shape))
#batch-scale the target array. per batch size.
batch_scaled_labels = np.zeros(shape=(label_array.shape))
for label_batch_scaler_counter in range(0,label_array.shape[0]): #how many batches there are
batch_scaled_labels[label_batch_scaler_counter:label_batch_scaler_counter+GENERATOR_BATCH_SIZE,:] = \
label_scaler_aux_regressor.fit_transform(
label_array[label_batch_scaler_counter:label_batch_scaler_counter+GENERATOR_BATCH_SIZE,:])
label_batch_scaler_counter += GENERATOR_BATCH_SIZE
label_array_to_fit = batch_scaled_labels[0:base_model_output_2d.shape[0],:]
#data_dmatrix = xgb.DMatrix(data=base_model_output_dmatrix) #input to the DMatrix has to be 2D. default is 3D.
#label_array_reshaped = np.reshape(label_array,newshape=()) #reshape to what? it needed reshaping for the Keras LSTM.
#label_dmatrix = xgb.DMatrix(data=label_array) #forget trying to get it through the generator.
#print(type(base_model_output_2d), type(label_array))
print("for fitting: feature shape: {}, uncut label shape: {}".format(base_model_output_2d.shape, label_array.shape))
if i == 0: #initialize for the first time
aux_reg_regressor = Ridge(solver='saga')
#aux_reg_regressor = LinearRegression()
#aux_reg_regressor = KernelRidge(alpha=1,kernel='polynomial',gamma=1.0e-3,)
#aux_reg_regressor = ExtraTreesRegressor(n_estimators=5,criterion='mse',n_jobs=2,warm_start=True)
#aux_reg_regressor = RandomForestRegressor(n_estimators=5,criterion='mse',n_jobs=-1,warm_start=True,oob_score=False)
#aux_reg_regressor = MultiOutputRegressor(estimator = GaussianProcessRegressor(random_state=1337),n_jobs=1) #MEMORY ERROR
#aux_reg_regressor = MultiOutputRegressor(estimator=ElasticNet(warm_start=True),n_jobs=1)
model_id = generate_model_id(aux_reg_regressor)
assert model_id != ""
print("model id is: ", model_id)
print("fitting regressor..")
if isinstance(aux_reg_regressor,MultiOutputRegressor) == True:
aux_reg_regressor.fit(X=base_model_output_2d, y=label_array_to_fit) #should be partial fit
else:
aux_reg_regressor.fit(X=base_model_output_2d, y=label_array_to_fit)
if isinstance(aux_reg_regressor,ExtraTreesRegressor) or isinstance(aux_reg_regressor,RandomForestRegressor):
tree_regressor_check_cond = True
if not isinstance(aux_reg_regressor,ExtraTreesRegressor) and not isinstance(aux_reg_regressor,RandomForestRegressor):
tree_regressor_check_cond = False
if i != 0:
#aux_reg_regressor = aux_reg_regressor_cached
label_array_to_fit = label_scaler_aux_regressor.fit_transform(label_array[0:base_model_output_2d.shape[0],:])
print("fitting regressor..")
if tree_regressor_check_cond == True:
print("feat_imp before fitting: {}".format(aux_reg_regressor.feature_importances_))
if isinstance(aux_reg_regressor,MultiOutputRegressor) == True:
aux_reg_regressor.fit(X=base_model_output_2d, y=label_array_to_fit)
else:
aux_reg_regressor.fit(X=base_model_output_2d, y=label_array_to_fit)
if tree_regressor_check_cond == True:
print("feat_imp after fitting: {}".format(aux_reg_regressor.feature_importances_))
# aux_reg_regressor_cached = aux_reg_regressor.fit(X=base_model_output_2d,y=label_array_to_fit)
#assert aux_reg_regressor_cached.feature_importances_ != aux_reg_regressor.feature_importances_
#aux_reg_regressor = aux_reg_regressor_cached
if tree_regressor_check_cond == True:
print("feat-imp: {}, estimators: {}, estimator params: {} ".format(
aux_reg_regressor.feature_importances_,aux_reg_regressor.estimators_,aux_reg_regressor.estimator_params))
train_end_time = time.clock()
train_time_elapsed = train_end_time - train_start_time
print("training time elapsed: {}".format(train_time_elapsed))
time_dict['train'] = train_time_elapsed
print("TESTING PHASE")
data_filenames = list(set(os.listdir(test_path + "data")))
# print("before sorting, data_filenames: {}".format(data_filenames))
data_filenames.sort()
# print("after sorting, data_filenames: {}".format(data_filenames))
label_filenames = list(set(os.listdir(test_path + "label")))
label_filenames.sort()
# print("label_filenames: {}".format(data_filenames))
assert len(data_filenames) == len(label_filenames)
combined_filenames = zip(data_filenames, label_filenames)
# print("before shuffling: {}".format(combined_filenames))
shuffle(combined_filenames)
print("after shuffling: {}".format(combined_filenames)) # shuffling works ok.
i = 0
score_rows_list = []
score_rows_list_scikit_raw = []
scores_dict = {}
mse_dict = {}
mse_dict_raw = {}
mae_dict = {}
mae_dict_raw = {}
mape_dict = {}
scores_dict_f3 = {}
mse_dict_f3 = {}
mse_dict_f3_raw = {}
mae_dict_f3 = {}
mae_dict_f3_raw = {}
mape_dict_f3 = {}
test_start_time = time.clock()
getparams_dict = aux_reg_regressor.get_params(deep=True)
print("getparams_dict: ", getparams_dict)
getparams_df = pd.DataFrame.from_dict(data=getparams_dict,orient='index')
getparams_df.to_csv(analysis_path + model_id + str(model)[:-4] + "getparams.csv")
model_as_pkl_filename = analysis_path + model_id + str(model)[:-4] +".pkl"
joblib.dump(aux_reg_regressor,filename=model_as_pkl_filename)
#np.savetxt(analysis_path + "rf5getparams.txt",fmt='%s',X=str(aux_reg_regressor.get_params(deep=True)))
#np.savetxt(analysis_path + "rf5estimatorparams.txt",fmt='%s',X=aux_reg_regressor.estimator_params) USELESS
#np.savetxt(analysis_path + "rf5classes.txt",fmt='%s',X=aux_reg_regressor.classes_)
#np.savetxt(analysis_path + "rf5baseestim.txt",fmt='%s',X=aux_reg_regressor.base_estimator_)
#TODO: CHANGE THIS BACK IF CUT SHORT!!
for files in combined_filenames:
print("filename", files)
i += 1
data_load_path = test_path + '/data/' + files[0]
label_load_path = test_path + '/label/' + files[1]
# print("data/label load path: {} \n {}".format(data_load_path,label_load_path))
test_array = np.load(data_load_path)
test_label_array = np.load(label_load_path)[:, 1:]
# --------COMMENTED OUT BECAUSE OF SCALER IN THE GENERATOR-----------------------------------
# test_array = np.reshape(test_array, (1, test_array.shape[0], test_array.shape[1]))
# label_array = np.reshape(label_array,(1,label_array.shape[0],label_array.shape[1])) #label doesn't need to be 3D
# print("file: {} data/label shape: {}, {}".format(files[0],test_array.shape, label_array.shape))
print(files[0])
# print("Metrics: {}".format(model.metrics_names))
# steps per epoch is how many times that generator is called
test_generator = pair_generator_1dconv_lstm_bagged(
test_array, test_label_array, start_at=0, generator_batch_size=GENERATOR_BATCH_SIZE,
use_precomputed_coeffs=False,
scaled=True, scaler_type='standard_per_batch',no_labels=True)
num_generator_yields = test_array.shape[0]//GENERATOR_BATCH_SIZE
base_model_output_test = base_model.predict_generator(test_generator,
steps=num_generator_yields)
base_model_output_2d_test_shape = (base_model_output_test.shape[0] * base_model_output_test.shape[1], base_model_output_test.shape[2])
base_model_output_2d_test = np.zeros(shape=base_model_output_2d_test_shape)
reshape_counter_test=0
for reshape_counter_test in range(0,base_model_output_test.shape[0]):
base_model_output_2d_test[reshape_counter_test:reshape_counter_test + GENERATOR_BATCH_SIZE, :] = np.reshape(
base_model_output_test[reshape_counter_test,:,:],newshape=(GENERATOR_BATCH_SIZE,base_model_output_test.shape[2])) #1,128,64 to 128,64
reshape_counter_test += 1
batch_scaled_test_labels = np.zeros(shape=(test_label_array.shape))
for label_batch_scaler_counter in range(0,base_model_output_test.shape[0]): #how many batches there are
batch_scaled_test_labels[label_batch_scaler_counter:label_batch_scaler_counter+GENERATOR_BATCH_SIZE,:] = \
label_scaler_aux_regressor.fit_transform(
test_label_array[label_batch_scaler_counter:label_batch_scaler_counter+GENERATOR_BATCH_SIZE,:])
label_batch_scaler_counter += GENERATOR_BATCH_SIZE
print("for fitting: feature shape: {}, uncut label shape: {}".format(base_model_output_2d_test.shape,
batch_scaled_test_labels.shape))
print("pretrained net's output shape: {}".format(base_model_output_2d_test.shape))
#tested_regressor = pkl.loads(trained_regressor)
test_label_array_to_fit = batch_scaled_test_labels[0:base_model_output_2d_test.shape[0],:]
index_last_3_batches = batch_scaled_test_labels.shape[0] - 3 * GENERATOR_BATCH_SIZE
score = aux_reg_regressor.score(X=base_model_output_2d_test,y=test_label_array_to_fit)
score_f3 = aux_reg_regressor.score(X=base_model_output_2d_test[index_last_3_batches:, :],
y=test_label_array_to_fit[index_last_3_batches:, :])
print("score: {}".format(score))
print("score_f3: {}".format(score_f3))
scores_dict[str(files[0])[:-4]] = score
scores_dict_f3[str(files[0])[:-4]] = score_f3
preds = aux_reg_regressor.predict(base_model_output_2d_test)
if save_preds == True:
#<class 'sklearn.ensemble.forest.RandomForestRegressor'>
#< class 'sklearn.ensemble.forest.ExtraTreesRegressor'>
preds_filename = analysis_path + "preds_" + model_id + "_" + str(files[0])[:-4] + "_" + str(model)[:-3]
np.save(file=preds_filename, arr=preds)
row_dict_scikit_raw = {}
mse = mean_squared_error(test_label_array_to_fit, preds)
mse_raw = mean_squared_error(test_label_array_to_fit, preds, multioutput='raw_values')
mse_f3 = mean_squared_error(test_label_array_to_fit[index_last_3_batches:, :],
preds[index_last_3_batches:, :])
mse_f3_raw = mean_squared_error(test_label_array_to_fit[index_last_3_batches:, :],
preds[index_last_3_batches:, :], multioutput='raw_values')
print("mse: {}".format(mse))
print("mse_f3: {}".format(mse_f3))
mse_dict[str(files[0])[:-4]] = mse
mse_dict_f3[str(files[0])[:-4]] = mse_f3
for flaw in range(0, len(mse_raw)):
row_dict_scikit_raw['mse_' + str(flaw)] = mse_raw[flaw]
for flaw in range(0, len(mse_f3_raw)):
row_dict_scikit_raw['mse_f3_' + str(flaw)] = mse_f3_raw[flaw]
mae = mean_absolute_error(test_label_array_to_fit, preds)
mae_raw = mean_absolute_error(test_label_array_to_fit, preds, multioutput='raw_values')
mae_f3 = mean_absolute_error(test_label_array_to_fit[index_last_3_batches:, :],
preds[index_last_3_batches:, :])
mae_f3_raw = mean_absolute_error(test_label_array_to_fit[index_last_3_batches:, :],
preds[index_last_3_batches:, :], multioutput='raw_values')
print("mae: {}".format(mae))
print("mae_f3: {}".format(mae_f3))
mae_dict[str(files[0])[:-4]] = mae
mae_dict_f3[str(files[0])[:-4]] = mae_f3
for flaw in range(0, len(mae_raw)):
row_dict_scikit_raw['mae_' + str(flaw)] = mae_raw[flaw]
for flaw in range(0, len(mae_f3_raw)):
row_dict_scikit_raw['mae_f3_' + str(flaw)] = mae_f3_raw[flaw]
score_rows_list_scikit_raw.append(row_dict_scikit_raw)
test_end_time = time.clock()
test_time_elapsed = test_end_time - test_start_time
print("test time elapsed: {}".format(test_time_elapsed))
time_dict['test_time'] = test_time_elapsed
time_df = pd.DataFrame.from_dict(time_dict,orient='index')
#time_df.rename(columns=['time'])
r2_scores_df = pd.DataFrame.from_dict(scores_dict,orient='index')
#r2_scores_df.rename(columns=['r2'])
mse_scores_df = pd.DataFrame.from_dict(mse_dict,orient='index')
#mse_scores_df.rename(columns=['mse'])
mae_scores_df = pd.DataFrame.from_dict(mae_dict,orient='index')
#mae_scores_df.rename(columns=['mae'])
name_list = ['filename','r2','mse','mae']
scores_combined_df = pd.DataFrame(pd.concat([r2_scores_df,mse_scores_df,mae_scores_df],axis=1))
scores_combined_df.set_axis(labels=name_list[1:], axis=1)
# time_df.to_csv("./analysis/time_rf5a_" + str(model) + ".csv")
# r2_scores_df.to_csv("./analysis/r2_rf5a_" + str(model) + ".csv")
# mse_scores_df.to_csv("./analysis/mse_rf5a_" + str(model) + ".csv")
# mae_scores_df.to_csv("./analysis/mae_rf5a_" + str(model) + ".csv")
scores_combined_df.to_csv("./analysis/combi_scores_" + model_id + "_" + str(model)[:-3] + ".csv")
#score_df = pd.DataFrame(data=score_rows_list, columns=score_rows_list[0].keys())
raw_scores_df = pd.DataFrame(data=score_rows_list_scikit_raw,columns = score_rows_list_scikit_raw[0].keys())
| gpl-3.0 | -7,266,670,127,247,519,000 | 56.778736 | 153 | 0.618591 | false |
jimi-c/ansible | lib/ansible/modules/network/aci/aci_interface_selector_to_switch_policy_leaf_profile.py | 10 | 7609 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_interface_selector_to_switch_policy_leaf_profile
short_description: Bind interface selector profiles to switch policy leaf profiles (infra:RsAccPortP)
description:
- Bind interface selector profiles to switch policy leaf profiles on Cisco ACI fabrics.
notes:
- This module requires an existing leaf profile, the module M(aci_switch_policy_leaf_profile) can be used for this.
- More information about the internal APIC class B(infra:RsAccPortP) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Bruno Calogero (@brunocalogero)
version_added: '2.5'
options:
leaf_profile:
description:
- Name of the Leaf Profile to which we add a Selector.
aliases: [ leaf_profile_name ]
interface_selector:
description:
- Name of Interface Profile Selector to be added and associated with the Leaf Profile.
aliases: [ name, interface_selector_name, interface_profile_name ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Associating an interface selector profile to a switch policy leaf profile
aci_interface_selector_to_switch_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
interface_selector: interface_profile_name
state: present
delegate_to: localhost
- name: Remove an interface selector profile associated with a switch policy leaf profile
aci_interface_selector_to_switch_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
interface_selector: interface_profile_name
state: absent
delegate_to: localhost
- name: Query an interface selector profile associated with a switch policy leaf profile
aci_interface_selector_to_switch_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_profile: sw_name
interface_selector: interface_profile_name
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
leaf_profile=dict(type='str', aliases=['leaf_profile_name']), # Not required for querying all objects
interface_selector=dict(type='str', aliases=['interface_profile_name', 'interface_selector_name', 'name']), # Not required for querying all objects
state=dict(type='str', default='present', choices=['absent', 'present', 'query'])
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_profile', 'interface_selector']],
['state', 'present', ['leaf_profile', 'interface_selector']]
],
)
leaf_profile = module.params['leaf_profile']
# WARNING: interface_selector accepts non existing interface_profile names and they appear on APIC gui with a state of "missing-target"
interface_selector = module.params['interface_selector']
state = module.params['state']
# Defining the interface profile tDn for clarity
interface_selector_tDn = 'uni/infra/accportprof-{0}'.format(interface_selector)
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraNodeP',
aci_rn='infra/nprof-{0}'.format(leaf_profile),
module_object=leaf_profile,
target_filter={'name': leaf_profile},
),
subclass_1=dict(
aci_class='infraRsAccPortP',
aci_rn='rsaccPortP-[{0}]'.format(interface_selector_tDn),
module_object=interface_selector,
target_filter={'name': interface_selector},
)
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraRsAccPortP',
class_config=dict(tDn=interface_selector_tDn),
)
aci.get_diff(aci_class='infraRsAccPortP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 | 5,042,319,242,676,004,000 | 29.805668 | 156 | 0.635957 | false |
fp7-netide/Usecases | Usecase1/POX/Application/basic_bridge.py | 1 | 2013 | from pox.core import core
import pox.openflow.libopenflow_01 as of
ETH_ARP = 0x0806
ETH_IP = 0x0800
# Lifetime of rules, in seconds.
FLOW_LIFE = 30
"""
Everything received at a port is forwarded thru the other.
The bridge learns the src ARP address that comes thru each port, so to filter
messages doing a loop.
knownMACs["srcMAC"] = switch_port
"""
class BasicBridge(object):
def __init__(self, connection):
self.connection = connection
self.knownMACs = {}
self.connection.addListeners(self)
def installFlowForwarding(self, event, packet, outport, duration = 0):
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet, event.port)
msg.actions.append(of.ofp_action_output(port = outport))
msg.data = event.ofp
if duration > 0:
msg.hard_timeout = duration
msg.idle_timeout = duration
self.connection.send(msg)
def drop(self, event):
# Does not install a rule. Just drops this packet.
if event.ofp.buffer_id is not None:
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
def forwardPacket(self, event, outPort):
# Does not install a rule. Just forwards this packet.
if event.ofp.buffer_id is not None:
msg = of.ofp_packet_out()
msg.actions.append(of.ofp_action_output(port = outPort))
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
def _handle_PacketIn(self, event):
packet = event.parsed
outPort = 1 if event.port == 2 else 2
forwarded = False
if packet.type == ETH_IP:
self.installFlowForwarding(event, packet, outPort, FLOW_LIFE)
forwarded = True
elif packet.type == ETH_ARP:
srcMac = packet.src.toStr()
try:
port = self.knownMACs[srcMac]
if port == event.port:
self.forwardPacket(event, outPort)
forwarded = True
except KeyError:
self.knownMACs[srcMac] = event.port
self.forwardPacket(event, outPort)
forwarded = True
if not forwarded:
self.drop(event)
| epl-1.0 | 4,620,845,121,418,771,000 | 26.202703 | 78 | 0.700944 | false |
nikitanovosibirsk/jj | tests/logs/test_simple_formatter.py | 1 | 1188 | from unittest.mock import Mock, sentinel
import pytest
from jj.logs import SimpleFormatter
from .._test_utils.steps import given, then, when
from ._log_record import TestLogRecord
@pytest.fixture()
def formatter():
return SimpleFormatter()
@pytest.fixture()
def record():
return TestLogRecord(sentinel.message)
def test_format_without_request_and_response(formatter: SimpleFormatter, record: TestLogRecord):
with when:
res = formatter.format(record)
with then:
assert res == str(sentinel.message)
def test_format_with_request(formatter: SimpleFormatter, record: TestLogRecord):
with given:
record.jj_request = Mock(url=Mock(path=sentinel.path))
with when:
res = formatter.format(record)
with then:
assert res == "-> {}".format(sentinel.path)
def test_format_with_response(formatter: SimpleFormatter, record: TestLogRecord):
with given:
record.jj_request = Mock()
record.jj_response = Mock(status=sentinel.status, reason=sentinel.reason)
with when:
res = formatter.format(record)
with then:
assert res == "<- {} {}\n".format(sentinel.status, sentinel.reason)
| apache-2.0 | -1,510,866,519,037,399,600 | 23.244898 | 96 | 0.691077 | false |
steveb/paunch | paunch/tests/test_builder_compose1.py | 1 | 7561 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from paunch.builder import compose1
from paunch.tests import base
class TestComposeV1Builder(base.TestCase):
@mock.patch('paunch.runner.DockerRunner', autospec=True)
def test_apply(self, runner):
config = {
'one': {
'start_order': 0,
'image': 'centos:7',
},
'two': {
'start_order': 1,
'image': 'centos:7',
},
'three': {
'start_order': 2,
'image': 'centos:7',
},
'four': {
'start_order': 10,
'image': 'centos:7',
},
'four_ls': {
'action': 'exec',
'start_order': 20,
'command': ['four', 'ls', '-l', '/']
}
}
r = runner.return_value
r.managed_by = 'tester'
r.discover_container_name = lambda n, c: '%s-12345678' % n
r.unique_container_name = lambda n: '%s-12345678' % n
r.docker_cmd = 'docker'
r.execute.return_value = ('Done!', '', 0)
builder = compose1.ComposeV1Builder('foo', config, r)
stdout, stderr, deploy_status_code = builder.apply()
self.assertEqual(0, deploy_status_code)
self.assertEqual(['Done!', 'Done!', 'Done!', 'Done!', 'Done!'], stdout)
self.assertEqual([], stderr)
r.execute.assert_has_calls([
mock.call(
['docker', 'run', '--name', 'one-12345678',
'--label', 'config_id=foo',
'--label', 'container_name=one',
'--label', 'managed_by=tester',
'--detach=true', 'centos:7']
),
mock.call(
['docker', 'run', '--name', 'two-12345678',
'--label', 'config_id=foo',
'--label', 'container_name=two',
'--label', 'managed_by=tester',
'--detach=true', 'centos:7']
),
mock.call(
['docker', 'run', '--name', 'three-12345678',
'--label', 'config_id=foo',
'--label', 'container_name=three',
'--label', 'managed_by=tester',
'--detach=true', 'centos:7']
),
mock.call(
['docker', 'run', '--name', 'four-12345678',
'--label', 'config_id=foo',
'--label', 'container_name=four',
'--label', 'managed_by=tester',
'--detach=true', 'centos:7']
),
mock.call(
['docker', 'exec', 'four-12345678', 'ls', '-l', '/']
),
])
@mock.patch('paunch.runner.DockerRunner', autospec=True)
def test_label_arguments(self, runner):
r = runner.return_value
r.managed_by = 'tester'
builder = compose1.ComposeV1Builder('foo', {}, r)
cmd = []
builder.label_arguments(cmd, 'one')
self.assertEqual(
['--label', 'config_id=foo',
'--label', 'container_name=one',
'--label', 'managed_by=tester'],
cmd)
labels = {
'foo': 'bar',
'bar': 'baz'
}
builder = compose1.ComposeV1Builder('foo', {}, r, labels=labels)
cmd = []
builder.label_arguments(cmd, 'one')
self.assertEqual(
['--label', 'foo=bar',
'--label', 'bar=baz',
'--label', 'config_id=foo',
'--label', 'container_name=one',
'--label', 'managed_by=tester'],
cmd)
def test_docker_run_args(self):
config = {
'one': {
'image': 'centos:7',
'privileged': True,
'user': 'bar',
'net': 'host',
'pid': 'container:bar',
'restart': 'always',
'env_file': '/tmp/foo.env',
}
}
builder = compose1.ComposeV1Builder('foo', config, None)
cmd = ['docker', 'run', '--name', 'one']
builder.docker_run_args(cmd, 'one')
self.assertEqual(
['docker', 'run', '--name', 'one',
'--detach=true', '--env-file=/tmp/foo.env',
'--net=host', '--pid=container:bar',
'--privileged=true', '--restart=always', '--user=bar',
'centos:7'],
cmd
)
def test_docker_run_args_lists(self):
config = {
'one': {
'image': 'centos:7',
'detach': False,
'command': 'ls -l /foo',
'environment': ['FOO=BAR', 'BAR=BAZ'],
'env_file': ['/tmp/foo.env', '/tmp/bar.env'],
'volumes': ['/foo:/foo:rw', '/bar:/bar:ro'],
'volumes_from': ['two', 'three']
}
}
builder = compose1.ComposeV1Builder('foo', config, None)
cmd = ['docker', 'run', '--name', 'one']
builder.docker_run_args(cmd, 'one')
self.assertEqual(
['docker', 'run', '--name', 'one',
'--env-file=/tmp/foo.env', '--env-file=/tmp/bar.env',
'--env=FOO=BAR', '--env=BAR=BAZ',
'--volume=/foo:/foo:rw', '--volume=/bar:/bar:ro',
'--volumes-from=two', '--volumes-from=three',
'centos:7', 'ls', '-l', '/foo'],
cmd
)
@mock.patch('paunch.runner.DockerRunner', autospec=True)
def test_docker_exec_args(self, runner):
r = runner.return_value
r.discover_container_name.return_value = 'one-12345678'
config = {
'one': {
'command': 'ls -l /foo',
'privileged': True,
'user': 'bar'
}
}
self.builder = compose1.ComposeV1Builder(
'foo', config, runner.return_value)
cmd = ['docker', 'exec']
self.builder.docker_exec_args(cmd, 'one')
self.assertEqual(
['docker', 'exec',
'--privileged=true', '--user=bar',
'one-12345678', '-l', '/foo'],
cmd
)
def test_command_argument(self):
b = compose1.ComposeV1Builder
self.assertEqual([], b.command_argument(None))
self.assertEqual([], b.command_argument(''))
self.assertEqual([], b.command_argument([]))
self.assertEqual(
['ls', '-l', '/foo-bar'],
b.command_argument(['ls', '-l', '/foo-bar'])
)
self.assertEqual(
['ls', '-l', '/foo-bar'],
b.command_argument('ls -l /foo-bar')
)
self.assertEqual(
['ls', '-l', '/foo bar'],
b.command_argument(['ls', '-l', '/foo bar'])
)
# don't expect quoted spaces to do the right thing
self.assertEqual(
['ls', '-l', '"/foo', 'bar"'],
b.command_argument('ls -l "/foo bar"')
)
| apache-2.0 | -3,199,082,565,730,056,700 | 33.525114 | 79 | 0.459595 | false |
gangadharkadam/vlinkerp | erpnext/accounts/doctype/pos_profile/pos_profile.py | 23 | 2364 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import cint
from frappe.model.document import Document
class POSProfile(Document):
def validate(self):
self.check_for_duplicate()
self.validate_expense_account()
self.validate_all_link_fields()
def check_for_duplicate(self):
res = frappe.db.sql("""select name, user from `tabPOS Profile`
where ifnull(user, '') = %s and name != %s and company = %s""",
(self.user, self.name, self.company))
if res:
if res[0][1]:
msgprint(_("POS Profile {0} already created for user: {1} and company {2}").format(res[0][0],
res[0][1], self.company), raise_exception=1)
else:
msgprint(_("Global POS Profile {0} already created for company {1}").format(res[0][0],
self.company), raise_exception=1)
def validate_expense_account(self):
if cint(frappe.defaults.get_global_default("auto_accounting_for_stock")) \
and not self.expense_account:
msgprint(_("Expense Account is mandatory"), raise_exception=1)
def validate_all_link_fields(self):
accounts = {"Account": [self.cash_bank_account, self.income_account,
self.expense_account], "Cost Center": [self.cost_center],
"Warehouse": [self.warehouse]}
for link_dt, dn_list in accounts.items():
for link_dn in dn_list:
if link_dn and not frappe.db.exists({"doctype": link_dt,
"company": self.company, "name": link_dn}):
frappe.throw(_("{0} does not belong to Company {1}").format(link_dn, self.company))
def on_update(self):
self.set_defaults()
def on_trash(self):
self.set_defaults(include_current_pos=False)
def set_defaults(self, include_current_pos=True):
frappe.defaults.clear_default("is_pos")
if not include_current_pos:
condition = " where name != '%s'" % self.name.replace("'", "\'")
else:
condition = ""
pos_view_users = frappe.db.sql_list("""select user
from `tabPOS Profile` {0}""".format(condition))
for user in pos_view_users:
if user:
frappe.defaults.set_user_default("is_pos", 1, user)
else:
frappe.defaults.set_global_default("is_pos", 1)
@frappe.whitelist()
def get_series():
return frappe.get_meta("Sales Invoice").get_field("naming_series").options or ""
| agpl-3.0 | -314,239,003,495,290,240 | 32.771429 | 97 | 0.686548 | false |
ro0lz/Stino | stino/pyarduino/base/i18n.py | 14 | 2332 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Documents
#
"""
Documents
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import glob
from . import deco
from . import sys_info
from . import language_file
from . import settings
@deco.singleton
class I18N(object):
def __init__(self):
self.load()
def load(self):
self.lang_params = {}
self.lang_ids = []
self.id_path_dict = {}
self.trans_dict = {}
self.list_ids()
self.settings = settings.get_arduino_settings()
self.lang_id = self.settings.get(
'lang_id', sys_info.get_sys_language())
self.change_lang(self.lang_id)
def list_ids(self):
self.lang_params = settings.get_user_settings(
'language.stino-settings')
preset_paths = [settings.get_preset_path(),
settings.get_user_preset_path()]
for preset_path in preset_paths:
lang_file_paths = glob.glob(preset_path + '/lang_*.txt')
lang_file_names = [os.path.basename(p) for p in lang_file_paths]
self.lang_ids += [name[5:-4] for name in lang_file_names]
self.id_path_dict.update(dict(zip(self.lang_ids, lang_file_paths)))
self.lang_ids.sort(key=lambda _id: self.lang_params.get(_id)[1])
def change_lang(self, lang_id):
if lang_id in self.id_path_dict:
self.lang_id = lang_id
lang_file_path = self.id_path_dict[lang_id]
lang_file = language_file.LanguageFile(lang_file_path)
self.trans_dict = lang_file.get_trans_dict()
else:
self.lang_id = 'en'
self.trans_dict = {}
self.settings.set('lang_id', self.lang_id)
def translate(self, text, *params):
trans_text = self.trans_dict.get(text, text)
for seq, param in enumerate(params):
seq_text = '{%d}' % seq
trans_text = trans_text.replace(seq_text, str(param))
return trans_text
def get_lang_id(self):
return self.lang_id
def get_lang_ids(self):
return self.lang_ids
def get_lang_names(self, lang_id):
return self.lang_params.get(lang_id, ['Unknown', 'Unknown'])
| mit | -2,367,766,367,153,444,000 | 28.518987 | 79 | 0.590051 | false |
OpenMOOC/moocng | moocng/courses/security.py | 1 | 5074 | # -*- coding: utf-8 -*-
# Copyright 2012-2013 UNED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import date
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from moocng.courses.models import Course, Unit, CourseTeacher
from moocng.http import Http410
def can_user_view_course(course, user):
"""
Returns a pair where the first element is a bool indicating if the user
can view the course and the second one is a string code explaining the
reason.
:returns: Bool
.. versionadded:: 0.1
"""
if course.is_active:
return True, 'active'
if user.is_superuser:
return True, 'is_superuser'
if user.is_staff:
return True, 'is_staff'
# check if the user is a teacher of the course
if not user.is_anonymous():
try:
CourseTeacher.objects.get(teacher=user, course=course)
return True, 'is_teacher'
except CourseTeacher.DoesNotExist:
pass
# at this point you don't have permissions to see a course
if course.is_public:
return False, 'not_active_outdated'
return False, 'not_active_yet'
def check_user_can_view_course(course, request):
"""
Raises a 404 error if the user can't see the course.
:returns: message or 404
.. versionadded:: 0.1
"""
can_view, reason = can_user_view_course(course, request.user)
if can_view:
if reason != 'active':
msg_table = {
'is_staff': _(u'This course is not public. Your have access to it because you are staff member'),
'is_superuser': _(u'This course is not public. Your have access to it because you are a super user'),
'is_teacher': _(u'This course is not public. Your have access to it because you are a teacher of the course'),
}
messages.warning(request, msg_table[reason])
else:
if reason == 'not_active_yet':
raise Http404()
else:
user = request.user
msg = _("We're sorry, but the course has finished. ")
if not user.is_anonymous():
msg += _("You could see your transcript <a href=\"%s\">here</a>") % reverse('transcript', args=(course.slug,))
raise Http410(msg)
def get_course_if_user_can_view_or_404(course_slug, request):
course = get_object_or_404(Course, slug=course_slug)
check_user_can_view_course(course, request)
return course
def get_courses_available_for_user(user):
"""
Filter in a list of courses what courses are available for the user.
:returns: Object list
.. versionadded:: 0.1
"""
if user.is_superuser or user.is_staff:
# Return every course that hasn't finished
return Course.objects.exclude(end_date__lt=date.today())
elif user.is_anonymous() or not CourseTeacher.objects.filter(teacher=user).exists():
# Regular user, return only the published courses
return Course.objects.exclude(end_date__lt=date.today()).filter(status='p')
else:
# Is a teacher, return draft courses if he is one of its teachers
return Course.objects.exclude(end_date__lt=date.today()).filter(Q(status='p') | Q(status='d', courseteacher__teacher=user)).distinct()
def get_units_available_for_user(course, user, is_overview=False):
"""
Filter units of a course what courses are available for the user.
:returns: Object list
.. versionadded:: 0.1
"""
if user.is_superuser or user.is_staff:
return course.unit_set.all()
elif user.is_anonymous():
if is_overview:
return course.unit_set.filter(Q(status='p') | Q(status='l'))
else:
return []
else:
if is_overview:
return Unit.objects.filter(
Q(status='p', course=course) |
Q(status='l', course=course) |
Q(status='d', course=course, course__courseteacher__teacher=user, course__courseteacher__course=course)).distinct()
else:
return Unit.objects.filter(
Q(status='p', course=course) |
Q(status='l', course=course, course__courseteacher__teacher=user, course__courseteacher__course=course) |
Q(status='d', course=course, course__courseteacher__teacher=user, course__courseteacher__course=course)).distinct()
| apache-2.0 | -7,393,958,337,664,639,000 | 33.753425 | 142 | 0.645447 | false |
100star/h2o | py/testdir_multi_jvm/notest_build_cloud_relaxed.py | 9 | 1284 | import unittest, time, sys
# not needed, but in case you move it down to subdir
sys.path.extend(['.','..','../..','py'])
import h2o
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
print "Test cloud building with completion = one node says desired size plus consensus=1"
print "Check is that all nodes agree on cloud size after completion rule"
pass
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_A_build_cloud_relaxed_2(self):
for trials in range(3):
h2o.init(2,java_heap_GB=1, conservative=False)
h2o.verify_cloud_size()
h2o.tear_down_cloud()
time.sleep(5)
def test_B_build_cloud_relaxed_3(self):
for trials in range(3):
h2o.init(3,java_heap_GB=1, conservative=False)
h2o.verify_cloud_size()
h2o.tear_down_cloud()
time.sleep(5)
def test_C_build_cloud_relaxed_1(self):
for trials in range(1):
h2o.init(1,java_heap_GB=1, conservative=False)
h2o.verify_cloud_size()
h2o.tear_down_cloud()
time.sleep(5)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 | 7,457,234,420,988,644,000 | 29.571429 | 97 | 0.591121 | false |
bdoner/SickRage | sickbeard/show_queue.py | 4 | 26177 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import traceback
import sickbeard
from imdb import _exceptions as imdb_exceptions
from sickbeard.common import WANTED
from sickbeard.tv import TVShow
from sickbeard import exceptions
from sickbeard import logger
from sickbeard import notifiers
from sickbeard import ui
from sickbeard import generic_queue
from sickbeard import name_cache
from sickbeard.exceptions import ex
from sickbeard.blackandwhitelist import BlackAndWhiteList
from libtrakt import TraktAPI
class ShowQueue(generic_queue.GenericQueue):
def __init__(self):
generic_queue.GenericQueue.__init__(self)
self.queue_name = "SHOWQUEUE"
def _isInQueue(self, show, actions):
return show.indexerid in [x.show.indexerid for x in self.queue if x.action_id in actions]
def _isBeingSomethinged(self, show, actions):
return self.currentItem != None and show == self.currentItem.show and \
self.currentItem.action_id in actions
def isInUpdateQueue(self, show):
return self._isInQueue(show, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE))
def isInRefreshQueue(self, show):
return self._isInQueue(show, (ShowQueueActions.REFRESH,))
def isInRenameQueue(self, show):
return self._isInQueue(show, (ShowQueueActions.RENAME,))
def isInSubtitleQueue(self, show):
return self._isInQueue(show, (ShowQueueActions.SUBTITLE,))
def isBeingAdded(self, show):
return self._isBeingSomethinged(show, (ShowQueueActions.ADD,))
def isBeingUpdated(self, show):
return self._isBeingSomethinged(show, (ShowQueueActions.UPDATE, ShowQueueActions.FORCEUPDATE))
def isBeingRefreshed(self, show):
return self._isBeingSomethinged(show, (ShowQueueActions.REFRESH,))
def isBeingRenamed(self, show):
return self._isBeingSomethinged(show, (ShowQueueActions.RENAME,))
def isBeingSubtitled(self, show):
return self._isBeingSomethinged(show, (ShowQueueActions.SUBTITLE,))
def _getLoadingShowList(self):
return [x for x in self.queue + [self.currentItem] if x != None and x.isLoading]
loadingShowList = property(_getLoadingShowList)
def updateShow(self, show, force=False):
if self.isBeingAdded(show):
raise exceptions.CantUpdateException(
str(show.name) + u" is still being added, wait until it is finished before you update.")
if self.isBeingUpdated(show):
raise exceptions.CantUpdateException(
str(show.name) + u" is already being updated by Post-processor or manually started, can't update again until it's done.")
if self.isInUpdateQueue(show):
raise exceptions.CantUpdateException(
str(show.name) + u" is in process of being updated by Post-processor or manually started, can't update again until it's done.")
if not force:
queueItemObj = QueueItemUpdate(show)
else:
queueItemObj = QueueItemForceUpdate(show)
self.add_item(queueItemObj)
return queueItemObj
def refreshShow(self, show, force=False):
if self.isBeingRefreshed(show) and not force:
raise exceptions.CantRefreshException("This show is already being refreshed, not refreshing again.")
if (self.isBeingUpdated(show) or self.isInUpdateQueue(show)) and not force:
logger.log(
u"A refresh was attempted but there is already an update queued or in progress. Since updates do a refresh at the end anyway I'm skipping this request.",
logger.DEBUG)
return
queueItemObj = QueueItemRefresh(show, force=force)
logger.log(u"Queueing show refresh for " + show.name, logger.DEBUG)
self.add_item(queueItemObj)
return queueItemObj
def renameShowEpisodes(self, show, force=False):
queueItemObj = QueueItemRename(show)
self.add_item(queueItemObj)
return queueItemObj
def downloadSubtitles(self, show, force=False):
queueItemObj = QueueItemSubtitle(show)
self.add_item(queueItemObj)
return queueItemObj
def addShow(self, indexer, indexer_id, showDir, default_status=None, quality=None, flatten_folders=None,
lang=None, subtitles=None, anime=None, scene=None, paused=None, blacklist=None, whitelist=None, default_status_after=None):
if lang is None:
lang = sickbeard.INDEXER_DEFAULT_LANGUAGE
queueItemObj = QueueItemAdd(indexer, indexer_id, showDir, default_status, quality, flatten_folders, lang,
subtitles, anime, scene, paused, blacklist, whitelist, default_status_after)
self.add_item(queueItemObj)
return queueItemObj
def removeShow(self, show, full=False):
if self._isInQueue(show, ShowQueueActions.REMOVE):
raise sickbeard.exceptions.CantRemoveException("This show is already queued to be removed")
# remove other queued actions for this show.
for x in self.queue:
if show.indexerid == x.show.indexerid and x != self.currentItem:
self.queue.remove(x)
queueItemObj = QueueItemRemove(show=show, full=full)
self.add_item(queueItemObj)
return queueItemObj
class ShowQueueActions:
REFRESH = 1
ADD = 2
UPDATE = 3
FORCEUPDATE = 4
RENAME = 5
SUBTITLE = 6
REMOVE = 7
names = {REFRESH: 'Refresh',
ADD: 'Add',
UPDATE: 'Update',
FORCEUPDATE: 'Force Update',
RENAME: 'Rename',
SUBTITLE: 'Subtitle',
REMOVE: 'Remove Show'
}
class ShowQueueItem(generic_queue.QueueItem):
"""
Represents an item in the queue waiting to be executed
Can be either:
- show being added (may or may not be associated with a show object)
- show being refreshed
- show being updated
- show being force updated
- show being subtitled
"""
def __init__(self, action_id, show):
generic_queue.QueueItem.__init__(self, ShowQueueActions.names[action_id], action_id)
self.show = show
def isInQueue(self):
return self in sickbeard.showQueueScheduler.action.queue + [
sickbeard.showQueueScheduler.action.currentItem] #@UndefinedVariable
def _getName(self):
return str(self.show.indexerid)
def _isLoading(self):
return False
show_name = property(_getName)
isLoading = property(_isLoading)
class QueueItemAdd(ShowQueueItem):
def __init__(self, indexer, indexer_id, showDir, default_status, quality, flatten_folders, lang, subtitles, anime,
scene, paused, blacklist, whitelist, default_status_after):
self.indexer = indexer
self.indexer_id = indexer_id
self.showDir = showDir
self.default_status = default_status
self.quality = quality
self.flatten_folders = flatten_folders
self.lang = lang
self.subtitles = subtitles
self.anime = anime
self.scene = scene
self.paused = paused
self.blacklist = blacklist
self.whitelist = whitelist
self.default_status_after = default_status_after
self.show = None
# this will initialize self.show to None
ShowQueueItem.__init__(self, ShowQueueActions.ADD, self.show)
# Process add show in priority
self.priority = generic_queue.QueuePriorities.HIGH
def _getName(self):
"""
Returns the show name if there is a show object created, if not returns
the dir that the show is being added to.
"""
if self.show == None:
return self.showDir
return self.show.name
show_name = property(_getName)
def _isLoading(self):
"""
Returns True if we've gotten far enough to have a show object, or False
if we still only know the folder name.
"""
if self.show == None:
return True
return False
isLoading = property(_isLoading)
def run(self):
ShowQueueItem.run(self)
logger.log(u"Starting to add show " + self.showDir)
# make sure the Indexer IDs are valid
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
logger.log(u"" + str(sickbeard.indexerApi(self.indexer).name) + ": " + repr(lINDEXER_API_PARMS))
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
s = t[self.indexer_id]
# this usually only happens if they have an NFO in their show dir which gave us a Indexer ID that has no proper english version of the show
if getattr(s, 'seriesname', None) is None:
logger.log(u"Show in " + self.showDir + " has no name on " + str(
sickbeard.indexerApi(self.indexer).name) + ", probably the wrong language used to search with.",
logger.ERROR)
ui.notifications.error("Unable to add show",
"Show in " + self.showDir + " has no name on " + str(sickbeard.indexerApi(
self.indexer).name) + ", probably the wrong language. Delete .nfo and add manually in the correct language.")
self._finishEarly()
return
# if the show has no episodes/seasons
if not s:
logger.log(u"Show " + str(s['seriesname']) + " is on " + str(
sickbeard.indexerApi(self.indexer).name) + " but contains no season/episode data.", logger.ERROR)
ui.notifications.error("Unable to add show",
"Show " + str(s['seriesname']) + " is on " + str(sickbeard.indexerApi(
self.indexer).name) + " but contains no season/episode data.")
self._finishEarly()
return
except Exception, e:
logger.log(u"Show name with ID %s doesn't exist on %s anymore. If you are using trakt, it will be removed from your TRAKT watchlist. If you are adding manually, try removing the nfo and adding again" %
(self.indexer_id,sickbeard.indexerApi(self.indexer).name) , logger.ERROR)
ui.notifications.error("Unable to add show",
"Unable to look up the show in " + self.showDir + " on " + str(sickbeard.indexerApi(
self.indexer).name) + " using ID " + str(
self.indexer_id) + ", not using the NFO. Delete .nfo and try adding manually again.")
if sickbeard.USE_TRAKT:
trakt_id = sickbeard.indexerApi(self.indexer).config['trakt_id']
trakt_api = TraktAPI(sickbeard.SSL_VERIFY, sickbeard.TRAKT_TIMEOUT)
title = self.showDir.split("/")[-1]
data = {
'shows': [
{
'title': title,
'ids': {}
}
]
}
if trakt_id == 'tvdb_id':
data['shows'][0]['ids']['tvdb'] = self.indexer_id
else:
data['shows'][0]['ids']['tvrage'] = self.indexer_id
trakt_api.traktRequest("sync/watchlist/remove", data, method='POST')
self._finishEarly()
return
try:
newShow = TVShow(self.indexer, self.indexer_id, self.lang)
newShow.loadFromIndexer()
self.show = newShow
# set up initial values
self.show.location = self.showDir
self.show.subtitles = self.subtitles if self.subtitles != None else sickbeard.SUBTITLES_DEFAULT
self.show.quality = self.quality if self.quality else sickbeard.QUALITY_DEFAULT
self.show.flatten_folders = self.flatten_folders if self.flatten_folders != None else sickbeard.FLATTEN_FOLDERS_DEFAULT
self.show.anime = self.anime if self.anime != None else sickbeard.ANIME_DEFAULT
self.show.scene = self.scene if self.scene != None else sickbeard.SCENE_DEFAULT
self.show.paused = self.paused if self.paused != None else False
# set up default new/missing episode status
logger.log(u"Setting all episodes to the specified default status: " + str(self.show.default_ep_status))
self.show.default_ep_status = self.default_status
if self.show.anime:
self.show.release_groups = BlackAndWhiteList(self.show.indexerid)
if self.blacklist:
self.show.release_groups.set_black_keywords(self.blacklist)
if self.whitelist:
self.show.release_groups.set_white_keywords(self.whitelist)
# be smartish about this
#if self.show.genre and "talk show" in self.show.genre.lower():
# self.show.air_by_date = 1
#if self.show.genre and "documentary" in self.show.genre.lower():
# self.show.air_by_date = 0
#if self.show.classification and "sports" in self.show.classification.lower():
# self.show.sports = 1
except sickbeard.indexer_exception, e:
logger.log(
u"Unable to add show due to an error with " + sickbeard.indexerApi(self.indexer).name + ": " + ex(e),
logger.ERROR)
if self.show:
ui.notifications.error(
"Unable to add " + str(self.show.name) + " due to an error with " + sickbeard.indexerApi(
self.indexer).name + "")
else:
ui.notifications.error(
"Unable to add show due to an error with " + sickbeard.indexerApi(self.indexer).name + "")
self._finishEarly()
return
except exceptions.MultipleShowObjectsException:
logger.log(u"The show in " + self.showDir + " is already in your show list, skipping", logger.WARNING)
ui.notifications.error('Show skipped', "The show in " + self.showDir + " is already in your show list")
self._finishEarly()
return
except Exception, e:
logger.log(u"Error trying to add show: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
self._finishEarly()
raise
logger.log(u"Retrieving show info from IMDb", logger.DEBUG)
try:
self.show.loadIMDbInfo()
except imdb_exceptions.IMDbError, e:
logger.log(u" Something wrong on IMDb api: " + ex(e), logger.WARNING)
except Exception, e:
logger.log(u"Error loading IMDb info: " + ex(e), logger.ERROR)
try:
self.show.saveToDB()
except Exception, e:
logger.log(u"Error saving the show to the database: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
self._finishEarly()
raise
# add it to the show list
sickbeard.showList.append(self.show)
try:
self.show.loadEpisodesFromIndexer()
except Exception, e:
logger.log(
u"Error with " + sickbeard.indexerApi(self.show.indexer).name + ", not creating episode list: " + ex(e),
logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
# update internal name cache
name_cache.buildNameCache()
try:
self.show.loadEpisodesFromDir()
except Exception, e:
logger.log(u"Error searching dir for episodes: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
# if they set default ep status to WANTED then run the backlog to search for episodes
# FIXME: This needs to be a backlog queue item!!!
if self.show.default_ep_status == WANTED:
logger.log(u"Launching backlog for this show since its episodes are WANTED")
sickbeard.backlogSearchScheduler.action.searchBacklog([self.show])
self.show.writeMetadata()
self.show.updateMetadata()
self.show.populateCache()
self.show.flushEpisodes()
if sickbeard.USE_TRAKT:
# if there are specific episodes that need to be added by trakt
sickbeard.traktCheckerScheduler.action.manageNewShow(self.show)
# add show to trakt.tv library
if sickbeard.TRAKT_SYNC:
sickbeard.traktCheckerScheduler.action.addShowToTraktLibrary(self.show)
if sickbeard.TRAKT_SYNC_WATCHLIST:
logger.log(u"update watchlist")
notifiers.trakt_notifier.update_watchlist(show_obj=self.show)
# Load XEM data to DB for show
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer, force=True)
# check if show has XEM mapping so we can determin if searches should go by scene numbering or indexer numbering.
if not self.scene and sickbeard.scene_numbering.get_xem_numbering_for_show(self.show.indexerid,
self.show.indexer):
self.show.scene = 1
# After initial add, set to default_status_after.
self.show.default_ep_status = self.default_status_after
self.finish()
def _finishEarly(self):
if self.show != None:
sickbeard.showQueueScheduler.action.removeShow(self.show)
self.finish()
class QueueItemRefresh(ShowQueueItem):
def __init__(self, show=None, force=False):
ShowQueueItem.__init__(self, ShowQueueActions.REFRESH, show)
# do refreshes first because they're quick
self.priority = generic_queue.QueuePriorities.NORMAL
# force refresh certain items
self.force = force
def run(self):
ShowQueueItem.run(self)
logger.log(u"Performing refresh on " + self.show.name)
self.show.refreshDir()
self.show.writeMetadata()
#if self.force:
# self.show.updateMetadata()
self.show.populateCache()
# Load XEM data to DB for show
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
self.finish()
class QueueItemRename(ShowQueueItem):
def __init__(self, show=None):
ShowQueueItem.__init__(self, ShowQueueActions.RENAME, show)
def run(self):
ShowQueueItem.run(self)
logger.log(u"Performing rename on " + self.show.name)
try:
show_loc = self.show.location
except exceptions.ShowDirNotFoundException:
logger.log(u"Can't perform rename on " + self.show.name + " when the show dir is missing.", logger.WARNING)
return
ep_obj_rename_list = []
ep_obj_list = self.show.getAllEpisodes(has_location=True)
for cur_ep_obj in ep_obj_list:
# Only want to rename if we have a location
if cur_ep_obj.location:
if cur_ep_obj.relatedEps:
# do we have one of multi-episodes in the rename list already
have_already = False
for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]:
if cur_related_ep in ep_obj_rename_list:
have_already = True
break
if not have_already:
ep_obj_rename_list.append(cur_ep_obj)
else:
ep_obj_rename_list.append(cur_ep_obj)
for cur_ep_obj in ep_obj_rename_list:
cur_ep_obj.rename()
self.finish()
class QueueItemSubtitle(ShowQueueItem):
def __init__(self, show=None):
ShowQueueItem.__init__(self, ShowQueueActions.SUBTITLE, show)
def run(self):
ShowQueueItem.run(self)
logger.log(u"Downloading subtitles for " + self.show.name)
self.show.downloadSubtitles()
self.finish()
class QueueItemUpdate(ShowQueueItem):
def __init__(self, show=None):
ShowQueueItem.__init__(self, ShowQueueActions.UPDATE, show)
self.force = False
def run(self):
ShowQueueItem.run(self)
logger.log(u"Beginning update of " + self.show.name, logger.DEBUG)
logger.log(u"Retrieving show info from " + sickbeard.indexerApi(self.show.indexer).name + "", logger.DEBUG)
try:
self.show.loadFromIndexer(cache=not self.force)
except sickbeard.indexer_error, e:
logger.log(u"Unable to contact " + sickbeard.indexerApi(self.show.indexer).name + ", aborting: " + ex(e),
logger.WARNING)
return
except sickbeard.indexer_attributenotfound, e:
logger.log(u"Data retrieved from " + sickbeard.indexerApi(
self.show.indexer).name + " was incomplete, aborting: " + ex(e), logger.ERROR)
return
logger.log(u"Retrieving show info from IMDb", logger.DEBUG)
try:
self.show.loadIMDbInfo()
except imdb_exceptions.IMDbError, e:
logger.log(u" Something wrong on IMDb api: " + ex(e), logger.WARNING)
except Exception, e:
logger.log(u"Error loading IMDb info: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
# have to save show before reading episodes from db
try:
self.show.saveToDB()
except Exception, e:
logger.log(u"Error saving show info to the database: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
# get episode list from DB
logger.log(u"Loading all episodes from the database", logger.DEBUG)
DBEpList = self.show.loadEpisodesFromDB()
# get episode list from TVDB
logger.log(u"Loading all episodes from " + sickbeard.indexerApi(self.show.indexer).name + "", logger.DEBUG)
try:
IndexerEpList = self.show.loadEpisodesFromIndexer(cache=not self.force)
except sickbeard.indexer_exception, e:
logger.log(u"Unable to get info from " + sickbeard.indexerApi(
self.show.indexer).name + ", the show info will not be refreshed: " + ex(e), logger.ERROR)
IndexerEpList = None
if IndexerEpList is None:
logger.log(u"No data returned from " + sickbeard.indexerApi(
self.show.indexer).name + ", unable to update this show", logger.ERROR)
else:
# for each ep we found on the Indexer delete it from the DB list
for curSeason in IndexerEpList:
for curEpisode in IndexerEpList[curSeason]:
curEp = self.show.getEpisode(curSeason, curEpisode)
curEp.saveToDB()
if curSeason in DBEpList and curEpisode in DBEpList[curSeason]:
del DBEpList[curSeason][curEpisode]
# remaining episodes in the DB list are not on the indexer, just delete them from the DB
for curSeason in DBEpList:
for curEpisode in DBEpList[curSeason]:
logger.log(u"Permanently deleting episode " + str(curSeason) + "x" + str(
curEpisode) + " from the database", logger.INFO)
curEp = self.show.getEpisode(curSeason, curEpisode)
try:
curEp.deleteEpisode()
except exceptions.EpisodeDeletedException:
pass
# save show again, in case episodes have changed
try:
self.show.saveToDB()
except Exception, e:
logger.log(u"Error saving show info to the database: " + ex(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
logger.log(u"Finished update of " + self.show.name, logger.DEBUG)
sickbeard.showQueueScheduler.action.refreshShow(self.show, self.force)
self.finish()
class QueueItemForceUpdate(QueueItemUpdate):
def __init__(self, show=None):
ShowQueueItem.__init__(self, ShowQueueActions.FORCEUPDATE, show)
self.force = True
class QueueItemRemove(ShowQueueItem):
def __init__(self, show=None, full=False):
ShowQueueItem.__init__(self, ShowQueueActions.REMOVE, show)
# lets make sure this happens before any other high priority actions
self.priority = generic_queue.QueuePriorities.HIGH + generic_queue.QueuePriorities.HIGH
self.full = full
def run(self):
ShowQueueItem.run(self)
logger.log(u"Removing %s" % self.show.name)
self.show.deleteShow(full=self.full)
if sickbeard.USE_TRAKT:
try:
sickbeard.traktCheckerScheduler.action.removeShowFromTraktLibrary(self.show)
except Exception as e:
logger.log(u"Unable to delete show from Trakt: %s. Error: %s" % (self.show.name, ex(e)),logger.WARNING)
self.finish()
| gpl-3.0 | -8,123,828,560,539,287,000 | 38.36391 | 213 | 0.611147 | false |
inteos/IBAdmin | utils/Collect/Client.py | 1 | 3095 | # -*- coding: UTF-8 -*-
#
# Copyright (c) 2015-2019 by Inteos Sp. z o.o.
# All rights reserved. See LICENSE file for details.
#
from __future__ import print_function
import psycopg2
import psycopg2.extras
import lib
PARAMS = {}
CLIENTS = []
STAT = {}
NR = 0
def setparam(cur, param, types, descr, unit, chart, display, color, box):
parid = lib.getsetparam(cur, param, types, descr, unit, chart, display, color, box)
PARAMS[param] = parid
lib.setstat(cur, parid)
def clientlist(conn, fg):
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
curparam = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("select R.name as name from config_confcomponent C, config_confrtype T, config_confresource R where C.type='D' and C.compid=R.compid and T.typeid=R.type and T.name='Client';")
for client in cur:
name = client['name']
if name not in CLIENTS:
CLIENTS.append(name)
setparam(curparam, "bacula.client."+name+".status", 'N', "Status of bacula-fd agent service at "+name, "Status",
1, 6, '#001F3F', 'box-primary')
global NR
NR = cur.rowcount
param = PARAMS["bacula.client.number"]
lib.update_stat_n(cur, param, NR)
if fg > 1:
print(PARAMS)
print(CLIENTS)
cur.close()
curparam.close()
def checkclient(name):
out = lib.bconsolecommand('.status client=\"'+name+'\" header')
for line in out:
if line.startswith('version='):
STAT[name] = 0
return 1
STAT[name] = NR * 2
return 0
def init(conn, fg):
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# chart: 1 - lines, 2 - bars, 3 - area
# display:
# 1 - autoscale, integer, min zero
# 2 - percent display
# 3 - autoscale, integer
# 4 - autoscale, decimal point
# 5 - autoscale, decimal point, min zero
# 6 - binary status display [online/offline]
# unit, chart, display, color, box
setparam(cur, "bacula.client.number", 'N', "Number of bacula-fd clients", 'Number', 1, 1, '#3c8dbc', 'box-info')
if fg > 1:
print (PARAMS)
cur.close()
def collect(conn, fg):
clientlist(conn, fg)
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
for client in CLIENTS:
if STAT.get(client, 0) == 0:
# last check was ok or never checked
if fg > 1:
print ("cheking ", client)
out = checkclient(client)
param = PARAMS["bacula.client."+client+".status"]
lib.update_stat_n(cur, param, out)
if out == 0:
if fg > 1:
print ("Timeout in checking client "+client+" !")
# TODO zastanowić się jak rozwiązać problem przerywania testu dla działających klientów
# CLIENTS.append(CLIENTS.pop(CLIENTS.index(client)))
break
else:
STAT[client] -= 1
if fg > 1:
print (STAT)
cur.close()
| agpl-3.0 | 6,708,109,047,721,927,000 | 31.505263 | 191 | 0.584845 | false |
DanielSBrown/osf.io | website/project/licenses/__init__.py | 10 | 3353 | import functools
import json
import os
import warnings
from modularodm import fields, Q
from modularodm.exceptions import NoResultsFound
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
from website import settings
def _serialize(fields, instance):
return {
field: getattr(instance, field)
for field in fields
}
serialize_node_license = functools.partial(_serialize, ('id', 'name', 'text'))
def serialize_node_license_record(node_license_record):
if node_license_record is None:
return {}
ret = serialize_node_license(node_license_record.node_license)
ret.update(_serialize(('year', 'copyright_holders'), node_license_record))
return ret
@mongo_utils.unique_on(['id', '_id'])
class NodeLicense(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
id = fields.StringField(required=True, unique=True, editable=False)
name = fields.StringField(required=True, unique=True)
text = fields.StringField(required=True)
properties = fields.StringField(list=True)
class NodeLicenseRecord(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node_license = fields.ForeignField('nodelicense', required=True)
# Deliberately left as a StringField to support year ranges (e.g. 2012-2015)
year = fields.StringField()
copyright_holders = fields.StringField(list=True)
@property
def name(self):
return self.node_license.name if self.node_license else None
@property
def text(self):
return self.node_license.text if self.node_license else None
@property
def id(self):
return self.node_license.id if self.node_license else None
def to_json(self):
return serialize_node_license_record(self)
def copy(self):
copied = NodeLicenseRecord(
node_license=self.node_license,
year=self.year,
copyright_holders=self.copyright_holders
)
copied.save()
return copied
def ensure_licenses(warn=True):
with open(
os.path.join(
settings.APP_PATH,
'node_modules', 'list-of-licenses', 'dist', 'list-of-licenses.json'
)
) as fp:
licenses = json.loads(fp.read())
for id, info in licenses.items():
name = info['name']
text = info['text']
properties = info.get('properties', [])
node_license = None
try:
node_license = NodeLicense.find_one(
Q('id', 'eq', id)
)
except NoResultsFound:
if warn:
warnings.warn(
'License {name} ({id}) not already in the database. Adding it now.'.format(
name=name,
id=id
)
)
node_license = NodeLicense(
id=id,
name=name,
text=text,
properties=properties
)
else:
node_license.name = name
node_license.text = text
node_license.properties = properties
node_license.save()
| apache-2.0 | -6,147,909,438,561,658,000 | 28.156522 | 99 | 0.581569 | false |
petewarden/tensorflow | tensorflow/compiler/tests/bucketize_op_test.py | 14 | 3065 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for bucketize_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BucketizationOpTest(xla_test.XLATestCase):
def testInt(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(expected_out,
sess.run(op, {p: [-5, 0, 2, 3, 5, 8, 10, 11, 12]}))
def testFloat(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0., 3., 8., 11.])
expected_out = [0, 1, 1, 2, 2, 3, 3, 4, 4]
self.assertAllEqual(
expected_out,
sess.run(op, {p: [-5., 0., 2., 3., 5., 8., 10., 11., 12.]}))
def test2DInput(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.float32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 3, 8, 11])
expected_out = [[0, 1, 1, 2, 2], [3, 3, 4, 4, 1]]
self.assertAllEqual(
expected_out, sess.run(op,
{p: [[-5, 0, 2, 3, 5], [8, 10, 11, 12, 0]]}))
@test_util.disable_mlir_bridge("Error handling")
def testInvalidBoundariesOrder(self):
with self.session() as sess:
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
op = math_ops._bucketize(p, boundaries=[0, 8, 3, 11])
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"Expected sorted boundaries"):
sess.run(op, {p: [-5, 0]})
def testBoundariesNotList(self):
with self.session():
with self.assertRaisesRegex(TypeError, "Expected list.*"):
p = array_ops.placeholder(dtypes.int32)
with self.test_scope():
math_ops._bucketize(p, boundaries=0)
if __name__ == "__main__":
test.main()
| apache-2.0 | 8,985,892,302,065,821,000 | 37.3125 | 80 | 0.62447 | false |
ISIFoundation/influenzanet-website | apps/accounts/forms.py | 1 | 3653 | from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.conf import settings
from django.contrib.auth.models import User
from registration.forms import RegistrationForm
from apps.reminder.models import UserReminderInfo
attrs_dict = {'class': 'required'}
class UnicodeRegistrationForm(RegistrationForm):
username = forms.RegexField(regex=r'(?u)^[\w.@+-]+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Username"),
error_messages={'invalid': _("This value must contain only letters, numbers and underscores.")})
class UnicodeUserChangeForm(UserChangeForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'(?u)^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
class UnicodeUserCreationForm(UserCreationForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'(?u)^[\w.@+-]+$',
help_text = _("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages = {'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")})
class EmailSettingsForm(forms.Form):
email = forms.EmailField(label=_("Email"))
send_reminders = forms.BooleanField(label=_("Send reminders"), help_text=_("Check this box if you wish to receive weekly reminders throughout the flu season"), required=False)
language = forms.ChoiceField(label=_("Language"), choices=settings.LANGUAGES)
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance')
self.reminder_info, _ = UserReminderInfo.objects.get_or_create(user=self.instance, defaults={'active': True, 'last_reminder': self.instance.date_joined})
initial = kwargs.pop('initial', {})
initial['email'] = self.instance.email
initial['send_reminders'] = self.reminder_info.active
initial['language'] = self.reminder_info.language if self.reminder_info.language else settings.LANGUAGE_CODE
kwargs['initial'] = initial
super(EmailSettingsForm, self).__init__(*args, **kwargs)
if len(settings.LANGUAGES) == 1:
del self.fields['language']
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.exclude(id=self.instance.id).filter(email=email).count():
raise forms.ValidationError(_("This email is already in use"))
return email
def save(self):
if self.instance.email == self.instance.username:
self.instance.username = self.cleaned_data['email'][:30]
self.instance.email = self.cleaned_data['email']
self.reminder_info.active = self.cleaned_data['send_reminders']
if 'language' in self.cleaned_data:
self.reminder_info.language = self.cleaned_data['language']
self.instance.save()
self.reminder_info.save()
class UsernameForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', )
def clean_username(self):
value = self.cleaned_data['username']
if User.objects.exclude(pk=self.instance.pk).filter(username=value).count():
raise forms.ValidationError(_("A user with this username already exists"))
return value
| agpl-3.0 | -828,040,422,963,893,800 | 43.54878 | 179 | 0.644128 | false |
breezjw/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Tool/g77.py | 61 | 2486 | """engine.SCons.Tool.g77
Tool-specific initialization for g77.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/g77.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_f77_to_env
compilers = ['g77', 'f77']
def generate(env):
"""Add Builders and construction variables for g77 to an Environment."""
add_all_to_env(env)
add_f77_to_env(env)
fcomp = env.Detect(compilers) or 'g77'
if env['PLATFORM'] in ['cygwin', 'win32']:
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS')
env['SHF77FLAGS'] = SCons.Util.CLVar('$F77FLAGS')
else:
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -fPIC')
env['SHF77FLAGS'] = SCons.Util.CLVar('$F77FLAGS -fPIC')
env['FORTRAN'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['F77'] = fcomp
env['SHF77'] = '$F77'
env['INCFORTRANPREFIX'] = "-I"
env['INCFORTRANSUFFIX'] = ""
env['INCF77PREFIX'] = "-I"
env['INCF77SUFFIX'] = ""
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | 7,687,149,388,932,123,000 | 33.054795 | 95 | 0.71078 | false |
childresslab/MicrocavityExp1 | hardware/simple_data_acq.py | 2 | 2043 | # -*- coding: utf-8 -*-
"""
Simple data acquisition from serial port.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import visa
from core.module import Base, ConfigOption
from interface.simple_data_interface import SimpleDataInterface
class SimpleAcq(Base, SimpleDataInterface):
""" Read human readable numbers from serial port.
"""
_modclass = 'simple'
_modtype = 'hardware'
resource = ConfigOption('interface', 'ASRL1::INSTR', missing='warn')
baudrate = ConfigOption('baudrate', 115200, missing='warn')
def on_activate(self):
""" Activate module.
"""
self.rm = visa.ResourceManager()
self.log.debug('Resources: {0}'.format(self.rm.list_resources()))
self.my_instrument = self.rm.open_resource(self.resource, baud_rate=self.baudrate)
def on_deactivate(self):
""" Deactivate module.
"""
self.my_instrument.close()
self.rm.close()
def getData(self):
""" Read one value from serial port.
@return int: vaue form serial port
"""
try:
return int(self.my_instrument.read_raw().decode('utf-8').rstrip().split()[1])
except:
return 0
def getChannels(self):
""" Number of channels.
@return int: number of channels
"""
return 1
| gpl-3.0 | 5,258,174,774,416,298,000 | 30.430769 | 90 | 0.670093 | false |
duncanmmacleod/gwpy | gwpy/signal/spectral/_scipy.py | 3 | 5371 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""GWpy API to the scipy.signal FFT routines
"""
import numpy
import scipy.signal
from ...frequencyseries import FrequencySeries
from ._utils import scale_timeseries_unit
from . import _registry as fft_registry
__author__ = 'Duncan Macleod <[email protected]>'
# -- density scaling methods --------------------------------------------------
def _spectral_density(timeseries, segmentlength, noverlap=None, name=None,
sdfunc=scipy.signal.welch, **kwargs):
"""Calculate a generic spectral density of this `TimeSeries`
"""
# compute spectral density
freqs, psd_ = sdfunc(
timeseries.value,
noverlap=noverlap,
fs=timeseries.sample_rate.decompose().value,
nperseg=segmentlength,
**kwargs
)
# generate FrequencySeries and return
unit = scale_timeseries_unit(
timeseries.unit,
kwargs.get('scaling', 'density'),
)
return FrequencySeries(
psd_,
unit=unit,
frequencies=freqs,
name=(name or timeseries.name),
epoch=timeseries.epoch,
channel=timeseries.channel,
)
def welch(timeseries, segmentlength, **kwargs):
"""Calculate a PSD using Welch's method
"""
kwargs.setdefault('average', 'mean')
return _spectral_density(timeseries, segmentlength, **kwargs)
def bartlett(timeseries, segmentlength, **kwargs):
"""Calculate a PSD using Bartlett's method
"""
kwargs.pop('noverlap', None)
return _spectral_density(timeseries, segmentlength, noverlap=0, **kwargs)
def median(timeseries, segmentlength, **kwargs):
"""Calculate a PSD using Welch's method with a median average
"""
kwargs.setdefault('average', 'median')
return _spectral_density(timeseries, segmentlength, **kwargs)
# register
for func in (welch, bartlett, median):
fft_registry.register_method(func, name=func.__name__)
# DEPRECATED:
fft_registry.register_method(func, name='scipy-{}'.format(func.__name__))
# -- others -------------------------------------------------------------------
def rayleigh(timeseries, segmentlength, noverlap=0, window='hann'):
"""Calculate a Rayleigh statistic spectrum
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
input `TimeSeries` data.
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, passing `None` will
choose based on the window method, default: ``0``
window : `str`, `numpy.ndarray`, optional
window function to apply to ``timeseries`` prior to FFT,
see :func:`scipy.signal.get_window` for details on acceptable
formats
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
"""
stepsize = segmentlength - noverlap
if noverlap:
numsegs = 1 + int((timeseries.size - segmentlength) / float(noverlap))
else:
numsegs = int(timeseries.size // segmentlength)
tmpdata = numpy.ndarray((numsegs, int(segmentlength//2 + 1)))
for i in range(numsegs):
tmpdata[i, :] = welch(
timeseries[i*stepsize:i*stepsize+segmentlength],
segmentlength, window=window)
std = tmpdata.std(axis=0)
mean = tmpdata.mean(axis=0)
return FrequencySeries(std/mean, unit='', copy=False, f0=0,
epoch=timeseries.epoch,
df=timeseries.sample_rate.value/segmentlength,
channel=timeseries.channel,
name='Rayleigh spectrum of %s' % timeseries.name)
def csd(timeseries, other, segmentlength, noverlap=None, **kwargs):
"""Calculate the CSD of two `TimeSeries` using Welch's method
Parameters
----------
timeseries : `~gwpy.timeseries.TimeSeries`
time-series of data
other : `~gwpy.timeseries.TimeSeries`
time-series of data
segmentlength : `int`
number of samples in single average.
noverlap : `int`
number of samples to overlap between segments, defaults to 50%.
**kwargs
other keyword arguments are passed to :meth:`scipy.signal.csd`
Returns
-------
spectrum : `~gwpy.frequencyseries.FrequencySeries`
average power `FrequencySeries`
See also
--------
scipy.signal.csd
"""
# calculate CSD
kwargs.setdefault('y', other.value)
return _spectral_density(
timeseries, segmentlength, noverlap=noverlap,
name=str(timeseries.name)+'---'+str(other.name),
sdfunc=scipy.signal.csd, **kwargs)
| gpl-3.0 | 7,788,211,428,837,832,000 | 30.409357 | 79 | 0.63936 | false |
spoki0/AI3 | AI3/opencv/sources/modules/ts/misc/xls-report.py | 1 | 15757 | #!/usr/bin/env python
"""
This script can generate XLS reports from OpenCV tests' XML output files.
To use it, first, create a directory for each machine you ran tests on.
Each such directory will become a sheet in the report. Put each XML file
into the corresponding directory.
Then, create your configuration file(s). You can have a global configuration
file (specified with the -c option), and per-sheet configuration files, which
must be called sheet.conf and placed in the directory corresponding to the sheet.
The settings in the per-sheet configuration file will override those in the
global configuration file, if both are present.
A configuration file must consist of a Python dictionary. The following keys
will be recognized:
* 'comparisons': [{'from': string, 'to': string}]
List of configurations to compare performance between. For each item,
the sheet will have a column showing speedup from configuration named
'from' to configuration named "to".
* 'configuration_matchers': [{'properties': {string: object}, 'name': string}]
Instructions for matching test run property sets to configuration names.
For each found XML file:
1) All attributes of the root element starting with the prefix 'cv_' are
placed in a dictionary, with the cv_ prefix stripped and the cv_module_name
element deleted.
2) The first matcher for which the XML's file property set contains the same
keys with equal values as its 'properties' dictionary is searched for.
A missing property can be matched by using None as the value.
Corollary 1: you should place more specific matchers before less specific
ones.
Corollary 2: an empty 'properties' dictionary matches every property set.
3) If a matching matcher is found, its 'name' string is presumed to be the name
of the configuration the XML file corresponds to. A warning is printed if
two different property sets match to the same configuration name.
4) If a such a matcher isn't found, if --include-unmatched was specified, the
configuration name is assumed to be the relative path from the sheet's
directory to the XML file's containing directory. If the XML file isinstance
directly inside the sheet's directory, the configuration name is instead
a dump of all its properties. If --include-unmatched wasn't specified,
the XML file is ignored and a warning is printed.
* 'configurations': [string]
List of names for compile-time and runtime configurations of OpenCV.
Each item will correspond to a column of the sheet.
* 'module_colors': {string: string}
Mapping from module name to color name. In the sheet, cells containing module
names from this mapping will be colored with the corresponding color. You can
find the list of available colors here:
<http://www.simplistix.co.uk/presentations/python-excel.pdf>.
* 'sheet_name': string
Name for the sheet. If this parameter is missing, the name of sheet's directory
will be used.
* 'sheet_properties': [(string, string)]
List of arbitrary (key, value) pairs that somehow describe the sheet. Will be
dumped into the first row of the sheet in string form.
Note that all keys are optional, although to get useful results, you'll want to
specify at least 'configurations' and 'configuration_matchers'.
Finally, run the script. Use the --help option for usage information.
"""
from __future__ import division
import ast
import errno
import fnmatch
import logging
import numbers
import os, os.path
import re
from argparse import ArgumentParser
from glob import glob
from itertools import ifilter
import xlwt
from testlog_parser import parseLogFile
re_image_size = re.compile(r'^ \d+ x \d+$', re.VERBOSE)
re_data_type = re.compile(r'^ (?: 8 | 16 | 32 | 64 ) [USF] C [1234] $', re.VERBOSE)
time_style = xlwt.easyxf(num_format_str='#0.00')
no_time_style = xlwt.easyxf('pattern: pattern solid, fore_color gray25')
speedup_style = time_style
good_speedup_style = xlwt.easyxf('font: color green', num_format_str='#0.00')
bad_speedup_style = xlwt.easyxf('font: color red', num_format_str='#0.00')
no_speedup_style = no_time_style
error_speedup_style = xlwt.easyxf('pattern: pattern solid, fore_color orange')
header_style = xlwt.easyxf('font: bold true; alignment: horizontal centre, vertical top, wrap True')
subheader_style = xlwt.easyxf('alignment: horizontal centre, vertical top')
class Collector(object):
def __init__(self, config_match_func, include_unmatched):
self.__config_cache = {}
self.config_match_func = config_match_func
self.include_unmatched = include_unmatched
self.tests = {}
self.extra_configurations = set()
# Format a sorted sequence of pairs as if it was a dictionary.
# We can't just use a dictionary instead, since we want to preserve the sorted order of the keys.
@staticmethod
def __format_config_cache_key(pairs, multiline=False):
return (
('{\n' if multiline else '{') +
(',\n' if multiline else ', ').join(
(' ' if multiline else '') + repr(k) + ': ' + repr(v) for (k, v) in pairs) +
('\n}\n' if multiline else '}')
)
def collect_from(self, xml_path, default_configuration):
run = parseLogFile(xml_path)
module = run.properties['module_name']
properties = run.properties.copy()
del properties['module_name']
props_key = tuple(sorted(properties.iteritems())) # dicts can't be keys
if props_key in self.__config_cache:
configuration = self.__config_cache[props_key]
else:
configuration = self.config_match_func(properties)
if configuration is None:
if self.include_unmatched:
if default_configuration is not None:
configuration = default_configuration
else:
configuration = Collector.__format_config_cache_key(props_key, multiline=True)
self.extra_configurations.add(configuration)
else:
logging.warning('failed to match properties to a configuration: %s',
Collector.__format_config_cache_key(props_key))
else:
same_config_props = [it[0] for it in self.__config_cache.iteritems() if it[1] == configuration]
if len(same_config_props) > 0:
logging.warning('property set %s matches the same configuration %r as property set %s',
Collector.__format_config_cache_key(props_key),
configuration,
Collector.__format_config_cache_key(same_config_props[0]))
self.__config_cache[props_key] = configuration
if configuration is None: return
module_tests = self.tests.setdefault(module, {})
for test in run.tests:
test_results = module_tests.setdefault((test.shortName(), test.param()), {})
new_result = test.get("gmean") if test.status == 'run' else test.status
test_results[configuration] = min(
test_results.get(configuration), new_result,
key=lambda r: (1, r) if isinstance(r, numbers.Number) else
(2,) if r is not None else
(3,)
) # prefer lower result; prefer numbers to errors and errors to nothing
def make_match_func(matchers):
def match_func(properties):
for matcher in matchers:
if all(properties.get(name) == value
for (name, value) in matcher['properties'].iteritems()):
return matcher['name']
return None
return match_func
def main():
arg_parser = ArgumentParser(description='Build an XLS performance report.')
arg_parser.add_argument('sheet_dirs', nargs='+', metavar='DIR', help='directory containing perf test logs')
arg_parser.add_argument('-o', '--output', metavar='XLS', default='report.xls', help='name of output file')
arg_parser.add_argument('-c', '--config', metavar='CONF', help='global configuration file')
arg_parser.add_argument('--include-unmatched', action='store_true',
help='include results from XML files that were not recognized by configuration matchers')
arg_parser.add_argument('--show-times-per-pixel', action='store_true',
help='for tests that have an image size parameter, show per-pixel time, as well as total time')
args = arg_parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
if args.config is not None:
with open(args.config) as global_conf_file:
global_conf = ast.literal_eval(global_conf_file.read())
else:
global_conf = {}
wb = xlwt.Workbook()
for sheet_path in args.sheet_dirs:
try:
with open(os.path.join(sheet_path, 'sheet.conf')) as sheet_conf_file:
sheet_conf = ast.literal_eval(sheet_conf_file.read())
except IOError as ioe:
if ioe.errno != errno.ENOENT: raise
sheet_conf = {}
logging.debug('no sheet.conf for %s', sheet_path)
sheet_conf = dict(global_conf.items() + sheet_conf.items())
config_names = sheet_conf.get('configurations', [])
config_matchers = sheet_conf.get('configuration_matchers', [])
collector = Collector(make_match_func(config_matchers), args.include_unmatched)
for root, _, filenames in os.walk(sheet_path):
logging.info('looking in %s', root)
for filename in fnmatch.filter(filenames, '*.xml'):
if os.path.normpath(sheet_path) == os.path.normpath(root):
default_conf = None
else:
default_conf = os.path.relpath(root, sheet_path)
collector.collect_from(os.path.join(root, filename), default_conf)
config_names.extend(sorted(collector.extra_configurations - set(config_names)))
sheet = wb.add_sheet(sheet_conf.get('sheet_name', os.path.basename(os.path.abspath(sheet_path))))
sheet_properties = sheet_conf.get('sheet_properties', [])
sheet.write(0, 0, 'Properties:')
sheet.write(0, 1,
'N/A' if len(sheet_properties) == 0 else
' '.join(str(k) + '=' + repr(v) for (k, v) in sheet_properties))
sheet.row(2).height = 800
sheet.panes_frozen = True
sheet.remove_splits = True
sheet_comparisons = sheet_conf.get('comparisons', [])
row = 2
col = 0
for (w, caption) in [
(2500, 'Module'),
(10000, 'Test'),
(2000, 'Image\nwidth'),
(2000, 'Image\nheight'),
(2000, 'Data\ntype'),
(7500, 'Other parameters')]:
sheet.col(col).width = w
if args.show_times_per_pixel:
sheet.write_merge(row, row + 1, col, col, caption, header_style)
else:
sheet.write(row, col, caption, header_style)
col += 1
for config_name in config_names:
if args.show_times_per_pixel:
sheet.col(col).width = 3000
sheet.col(col + 1).width = 3000
sheet.write_merge(row, row, col, col + 1, config_name, header_style)
sheet.write(row + 1, col, 'total, ms', subheader_style)
sheet.write(row + 1, col + 1, 'per pixel, ns', subheader_style)
col += 2
else:
sheet.col(col).width = 4000
sheet.write(row, col, config_name, header_style)
col += 1
col += 1 # blank column between configurations and comparisons
for comp in sheet_comparisons:
sheet.col(col).width = 4000
caption = comp['to'] + '\nvs\n' + comp['from']
if args.show_times_per_pixel:
sheet.write_merge(row, row + 1, col, col, caption, header_style)
else:
sheet.write(row, col, caption, header_style)
col += 1
row += 2 if args.show_times_per_pixel else 1
sheet.horz_split_pos = row
sheet.horz_split_first_visible = row
module_colors = sheet_conf.get('module_colors', {})
module_styles = {module: xlwt.easyxf('pattern: pattern solid, fore_color {}'.format(color))
for module, color in module_colors.iteritems()}
for module, tests in sorted(collector.tests.iteritems()):
for ((test, param), configs) in sorted(tests.iteritems()):
sheet.write(row, 0, module, module_styles.get(module, xlwt.Style.default_style))
sheet.write(row, 1, test)
param_list = param[1:-1].split(', ') if param.startswith('(') and param.endswith(')') else [param]
image_size = next(ifilter(re_image_size.match, param_list), None)
if image_size is not None:
(image_width, image_height) = map(int, image_size.split('x', 1))
sheet.write(row, 2, image_width)
sheet.write(row, 3, image_height)
del param_list[param_list.index(image_size)]
data_type = next(ifilter(re_data_type.match, param_list), None)
if data_type is not None:
sheet.write(row, 4, data_type)
del param_list[param_list.index(data_type)]
sheet.row(row).write(5, ' | '.join(param_list))
col = 6
for c in config_names:
if c in configs:
sheet.write(row, col, configs[c], time_style)
else:
sheet.write(row, col, None, no_time_style)
col += 1
if args.show_times_per_pixel:
sheet.write(row, col,
xlwt.Formula('{0} * 1000000 / ({1} * {2})'.format(
xlwt.Utils.rowcol_to_cell(row, col - 1),
xlwt.Utils.rowcol_to_cell(row, 2),
xlwt.Utils.rowcol_to_cell(row, 3)
)),
time_style
)
col += 1
col += 1 # blank column
for comp in sheet_comparisons:
cmp_from = configs.get(comp["from"])
cmp_to = configs.get(comp["to"])
if isinstance(cmp_from, numbers.Number) and isinstance(cmp_to, numbers.Number):
try:
speedup = cmp_from / cmp_to
sheet.write(row, col, speedup, good_speedup_style if speedup > 1.1 else
bad_speedup_style if speedup < 0.9 else
speedup_style)
except ArithmeticError as e:
sheet.write(row, col, None, error_speedup_style)
else:
sheet.write(row, col, None, no_speedup_style)
col += 1
row += 1
if row % 1000 == 0: sheet.flush_row_data()
wb.save(args.output)
if __name__ == '__main__':
main()
| mit | -4,614,468,257,560,552,000 | 41.471698 | 114 | 0.584566 | false |
zaheerm/gst-python | testsuite/test_buffer.py | 3 | 5921 | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# gst-python - Python bindings for GStreamer
# Copyright (C) 2002 David I. Lehn
# Copyright (C) 2004 Johan Dahlin
# Copyright (C) 2005 Edward Hervey
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import gc
from common import gobject, gst, unittest, TestCase
class BufferTest(TestCase):
def testBufferBuffer(self):
buf = gst.Buffer('test')
assert str(buffer(buf)) == 'test'
def testBufferStr(self):
buffer = gst.Buffer('test')
assert str(buffer) == 'test'
def testBufferAlloc(self):
bla = 'mooooooo'
buffer = gst.Buffer(bla + '12345')
gc.collect ()
assert str(buffer) == 'mooooooo12345'
def testBufferBadConstructor(self):
self.assertRaises(TypeError, gst.Buffer, 'test', 0)
def testBufferStrNull(self):
test_string = 't\0e\0s\0t\0'
buffer = gst.Buffer(test_string)
assert str(buffer) == test_string
def testBufferSize(self):
test_string = 'a little string'
buffer = gst.Buffer(test_string)
assert len(buffer) == len(test_string)
assert hasattr(buffer, 'size')
assert buffer.size == len(buffer)
def testBufferCreateSub(self):
s = ''
for i in range(64):
s += '%02d' % i
buffer = gst.Buffer(s)
self.assertEquals(len(buffer), 128)
sub = buffer.create_sub(16, 16)
self.assertEquals(sub.size, 16)
self.assertEquals(sub.data, buffer.data[16:32])
self.assertEquals(sub.offset, gst.CLOCK_TIME_NONE)
def testBufferMerge(self):
buffer1 = gst.Buffer('foo')
buffer2 = gst.Buffer('bar')
merged_buffer = buffer1.merge(buffer2)
assert str(merged_buffer) == 'foobar'
def testBufferJoin(self):
buffer1 = gst.Buffer('foo')
buffer2 = gst.Buffer('bar')
joined_buffer = buffer1.merge(buffer2)
assert str(joined_buffer) == 'foobar'
def testBufferSpan(self):
buffer1 = gst.Buffer('foo')
buffer2 = gst.Buffer('bar')
spaned_buffer = buffer1.span(0L, buffer2, 6L)
assert str(spaned_buffer) == 'foobar'
def testBufferCopyOnWrite(self):
s='test_vector'
buffer = gst.Buffer(s)
sub = buffer.create_sub(0, buffer.size)
self.assertEquals(sub.size, buffer.size)
out = sub.copy_on_write ()
self.assertEquals(out.size, sub.size)
assert str(out) == str(buffer)
out[5] = 'w'
assert str(out) == 'test_wector'
def testBufferFlagIsSet(self):
buffer = gst.Buffer()
# Off by default
assert not buffer.flag_is_set(gst.BUFFER_FLAG_READONLY)
# Try switching on and off
buffer.flag_set(gst.BUFFER_FLAG_READONLY)
assert buffer.flag_is_set(gst.BUFFER_FLAG_READONLY)
buffer.flag_unset(gst.BUFFER_FLAG_READONLY)
assert not buffer.flag_is_set(gst.BUFFER_FLAG_READONLY)
# Try switching on and off
buffer.flag_set(gst.BUFFER_FLAG_IN_CAPS)
assert buffer.flag_is_set(gst.BUFFER_FLAG_IN_CAPS)
buffer.flag_unset(gst.BUFFER_FLAG_IN_CAPS)
assert not buffer.flag_is_set(gst.BUFFER_FLAG_IN_CAPS)
def testAttrFlags(self):
buffer = gst.Buffer()
assert hasattr(buffer, "flags")
assert isinstance(buffer.flags, int)
def testAttrTimestamp(self):
buffer = gst.Buffer()
assert hasattr(buffer, "timestamp")
assert isinstance(buffer.timestamp, long)
assert buffer.timestamp == gst.CLOCK_TIME_NONE
buffer.timestamp = 0
assert buffer.timestamp == 0
buffer.timestamp = 2**64 - 1
assert buffer.timestamp == 2**64 - 1
def testAttrDuration(self):
buffer = gst.Buffer()
assert hasattr(buffer, "duration")
assert isinstance(buffer.duration, long)
assert buffer.duration == gst.CLOCK_TIME_NONE
buffer.duration = 0
assert buffer.duration == 0
buffer.duration = 2**64 - 1
assert buffer.duration == 2**64 - 1
def testAttrOffset(self):
buffer = gst.Buffer()
assert hasattr(buffer, "offset")
assert isinstance(buffer.offset, long)
assert buffer.offset == gst.CLOCK_TIME_NONE
buffer.offset = 0
assert buffer.offset == 0
buffer.offset = 2**64 - 1
assert buffer.offset == 2**64 - 1
def testAttrOffset_end(self):
buffer = gst.Buffer()
assert hasattr(buffer, "offset_end")
assert isinstance(buffer.offset_end, long)
assert buffer.offset_end == gst.CLOCK_TIME_NONE
buffer.offset_end = 0
assert buffer.offset_end == 0
buffer.offset_end = 2**64 - 1
assert buffer.offset_end == 2**64 - 1
def testBufferCaps(self):
buffer = gst.Buffer()
caps = gst.caps_from_string('foo/blah')
gst.info("before settings caps")
buffer.set_caps(caps)
gst.info("after settings caps")
c = buffer.get_caps()
gst.info("after getting caps")
self.assertEquals(caps, c)
if __name__ == "__main__":
unittest.main()
| lgpl-2.1 | -7,369,971,035,358,484,000 | 32.264045 | 74 | 0.619828 | false |
canfar/cadcstats | svc_plots/condor.py | 1 | 26090 | #!/Users/will/anaconda3/bin/python
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import scan
from elasticsearch_xpack import XPackClient
import requests
import pandas as pd
import numpy as np
import re
from ipaddress import IPv4Address as ipv4, AddressValueError
import time
from bokeh.plotting import figure, output_file, show, save
from bokeh.models import FuncTickFormatter, FixedTicker, NumeralTickFormatter, Div, Title, LinearAxis, Range1d
from bokeh.charts import Bar, Donut
from bokeh.layouts import gridplot, column, row
es = 'http://users:[email protected]:9200'
class Init():
def __init__(self, url = None, timeout = 120):
self.timeout = timeout
if not url:
self.url = es
else:
self.url = url
if not requests.get(self.url):
print("Connection incorrect!")
exit(0)
def connect(self):
return Elasticsearch(self.url, timeout = self.timeout)
# Number of Batch Jobs Restarts
def fig1(conn, idx):
query = {
"query" : {
"match_all" : {}
},
"aggs" : {
"numres_peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"res_ranges" : {
"range" : {
"field" : "NumJobStarts",
# ranges are [from, to)
"ranges" : [
{"to" : 1},
{"from" : 1, "to" : 2},
{"from" : 2, "to" : 6},
{"from" : 6}
]
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["numres_peryr"]["buckets"]:
yr = _["key_as_string"]
events = [__["doc_count"] for __ in _["res_ranges"]["buckets"]]
df = df.append(pd.DataFrame([events], columns = ["Never", "Once", "2-5", ">5"], index = [yr]))
p = figure(plot_width = 1200, toolbar_location = "above")
clr = ["blue", "purple", "orange", "green"]
x = np.array([_ for _ in range(len(df))])
for i, col in enumerate(df.columns):
p.vbar(x = x + i/5 - 0.3, top = np.sqrt(df[col]), bottom = 0, width = 0.15, legend = col, color = clr[i])
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
y = np.array([0.1, 0.5, 1, 2, 4])
p.yaxis[0].ticker = FixedTicker(ticks = np.sqrt(y * 1e6))
p.yaxis[0].formatter = FuncTickFormatter(code = """return (tick**2 / 1e6).toLocaleString("en-US", { minimumFractionDigits: 1 })""")
p.yaxis.axis_label = "Number of jobs (millions)"
return column(Div(text = "<h1>Batch Processing Job Restarts</h1>", width = 600), p)
# histogram of job duration vs machine duration
def fig2(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{"term" : {"JobStatus.keyword" : "Completed"}}
]
}
},
"aggs" : {
"jobdur_ranges" : {
"range" : {
"field" : "JobDuration",
# ranges are [from, to)
"ranges" : [
{"to" : 10},
{"from" : 10, "to" : 60},
{"from" : 60, "to" : 600},
{"from" : 600, "to" : 3600},
{"from" : 3600, "to" : 18000},
{"from" : 18000, "to" : 36000},
{"from" : 36000, "to" : 180000},
{"from" : 180000, "to" : 252000},
{"from" : 252000}
]
}
},
"machdur_ranges" : {
"range" : {
"script" : {
"lang" : "painless",
"inline" : "doc['CompletionDate'].value - doc['QDate'].value"
},
# ranges are [from, to)
"ranges" : [
{"to" : 10},
{"from" : 10, "to" : 60},
{"from" : 60, "to" : 600},
{"from" : 600, "to" : 3600},
{"from" : 3600, "to" : 18000},
{"from" : 18000, "to" : 36000},
{"from" : 36000, "to" : 180000},
{"from" : 180000, "to" : 252000},
{"from" : 252000}
]
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
cols = ["<10s", "10s~1m", "1m~10m", "10m~1h", "1h~5h", "5h~10h", "10h~50h", "50h~70h", ">70h"]
for i in ["jobdur_ranges", "machdur_ranges"]:
df = df.append(pd.DataFrame([[_["doc_count"] for _ in res["aggregations"][i]["buckets"]]], columns = cols, index = [i]))
df = df.T
p = figure(plot_width = 1200, toolbar_location = "above")
clr = ["blue", "purple", "orange", "green"]
x = np.array([_ for _ in range(len(df))])
for i, col in enumerate(df.columns):
p.vbar(x = x + i/5 - 0.10, top = np.sqrt(df[col]), bottom = 0, width = 0.2, color = clr[i], legend = "Machine" if col == "jobdur_ranges" else "User")
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
y = np.array([0.1, 0.5, 1, 2, 4])
p.yaxis[0].ticker = FixedTicker(ticks = np.sqrt(y * 1e6))
p.yaxis[0].formatter = FuncTickFormatter(code = """return (tick**2 / 1e6).toLocaleString("en-US", { minimumFractionDigits: 1 })""")
p.yaxis.axis_label = "Number of jobs (millions)"
return column(Div(text = "<h1>Batch Processing Jobs: Machine and User Duration</h1>", width = 1200), p)
# Median of Machine and User Batch Job Duration
def fig3(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{"term" : {"JobStatus.keyword" : "Completed"}}
]
}
},
"aggs" : {
"dur_peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"jobdur_outlier" : {
"percentiles" : {
"field" : "JobDuration"
}
},
"machdur_outlier" : {
"percentiles" : {
"script" : {
"lang" : "painless",
"inline" : "doc['CompletionDate'].value - doc['QDate'].value"
}
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["dur_peryr"]["buckets"]:
yr = _["key_as_string"]
machdur_med = _["machdur_outlier"]["values"]["50.0"]
jobdur_med = _["jobdur_outlier"]["values"]["50.0"]
df = df.append(pd.DataFrame([[jobdur_med / 60, machdur_med / 60]], columns = ["jobdur_med", "machdur_med"], index = [yr]))
p = figure(plot_width = 1200, toolbar_location = "above")
clr = ["blue", "purple", "orange", "green"]
x = np.array([_ for _ in range(len(df))])
for i, col in enumerate(df.columns):
p.vbar(x = x + i/5 - 0.10, top = np.sqrt(df[col]), bottom = 0, width = 0.2, color = clr[i], legend = "Machine" if col == "jobdur_med" else "User")
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
y = np.array([5, 30, 100, 400, 1600])
p.yaxis[0].ticker = FixedTicker(ticks = np.sqrt(y))
p.yaxis[0].formatter = FuncTickFormatter(code = """return (tick**2).toLocaleString("en-US", { minimumFractionDigits: 0 })""")
p.yaxis.axis_label = "Median of Duration (Mins)"
return column(Div(text = "<h1>Median of Machine and User Batch Job Duration</h1>", width = 1200), p)
# Histogram of User Job Duration / Machine Job Duration Ratio
def fig4(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{"term" : {"JobStatus.keyword" : "Completed"}}
]
}
},
"aggs": {
"ratio" : {
"histogram" : {
"field" : "JobDuration",
"interval" : 0.001,
"script" : "_value / (doc['CompletionDate'].value - doc['QDate'].value)"
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["ratio"]["buckets"]:
df = df.append(pd.DataFrame([[_["doc_count"]]], columns = ["ratio"], index = ['{:.3f}'.format(_["key"])]))
p = figure(plot_width = 1200, toolbar_location = "above")
p.vbar(x = list(map(float, df.index.values)), top = df["ratio"], bottom = 0, width = 0.001)
p.xaxis[0].formatter = NumeralTickFormatter(format = "0.00%")
p.yaxis.axis_label = "Number of Events"
return column(Div(text = "<h1>Histogram of Machine Job Duration / User Job Duration Ratio</h1>", width = 1200), p)
# Number of Batch Processing Users
def fig5(conn, idx):
query = {
"query" : {
"match_all" : {}
},
"aggs" : {
"usr_peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"unique_users" : {
"cardinality" : {
"field": "Owner.keyword"
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["usr_peryr"]["buckets"]:
yr = _["key_as_string"]
val = _["unique_users"]["value"]
df = df.append(pd.DataFrame([[val]], columns = ["uniq_usr"], index = [yr]))
p = figure(plot_width = 1200, toolbar_location = "above")
x = [_ for _ in range(len(df))]
p.vbar(x = x, top = df["uniq_usr"], bottom = 0, width = 0.8)
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.yaxis[0].axis_label = "Number of Users"
p.xaxis[0].axis_label = "Year"
return column(Div(text = "<h1>Number of Batch Processing Users</h1>", width = 1200), p)
# Request Ram/Dsk vs VM Ram/Dsk per VM Flavor
def fig6(conn, idx):
query = {
"query" : {
"bool" : {
"must_not" : [
{ "term": { "VMInstanceType.keyword" : "c4.med"} },
{ "term": { "VMInstanceType.keyword" : "12345678-6341-470e-92b7-5142014e7c5e"}},
{ "term": { "VMInstanceType.keyword" : "5c1ed3eb-6341-470e-92b7-5142014e7c5e"}}
]
}
},
"aggs" : {
"grpby_vm" : {
"terms" : {
"field" : "VMInstanceType.keyword",
"size" : 100
},
"aggs" : {
"avg_dskreq" : {
"avg": {
"field" : "RequestDisk"
}
},
"avg_ramreq" : {
"avg": {
"field" : "RequestMemory"
}
},
"dskspec" : {
"avg": {
"field" : "VMSpec.DISK"
}
},
"ramspec" : {
"avg": {
"field" : "VMSpec.RAM"
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["grpby_vm"]["buckets"]:
vm = _["key"]
avg_dskreq = _["avg_dskreq"]["value"]
avg_ramreq = _["avg_ramreq"]["value"]
dskspec = _["dskspec"]["value"]
ramspec = _["ramspec"]["value"]
df = df.append(pd.DataFrame([[vm, avg_dskreq / 1024, avg_ramreq, dskspec, ramspec]], columns = ["vm", "avg_dskreq", "avg_ramreq", "dskspec", "ramspec"]))
VMAlias = {
"c16.med":"13efd2a1-2fd8-48c4-822f-ce9bdc0e0004",
"c2.med":"23090fc1-bdf7-433e-9804-a7ec3d11de08",
"p8-12gb":"2cb70964-721d-47ff-badb-b702898b6fc2",
"c4.hi":"5112ed51-d263-4cc7-8b0f-7ef4782f783c",
"c2.low":"6c1ed3eb-6341-470e-92b7-5142014e7c5e",
"c8.med":"72009191-d893-4a07-871c-7f6e50b4e110",
"c4.low":"8061864c-722b-4f79-83af-91c3a835bd48",
"p8-6gb":"848b71a2-ae6b-4fcf-bba4-b7b0fccff5cf",
"c8.low":"8953676d-def7-4290-b239-4a14311fbb69",
"c8.hi":"a55036b9-f40c-4781-a293-789647c063d7",
"c16.hi":"d816ae8b-ab7d-403d-ae5f-f457b775903d",
"p1-0.75gb-tobedeleted":"f9f6fbd7-a0af-4604-8911-041ea6cbbbe4"
}
df = df.replace({"vm": VMAlias})
df = df.set_index("vm")
df = df.groupby(df.index).mean().sort_values(by = "ramspec")
y = np.array([_ for _ in range(len(df))])
clr = ["purple", "blue", "green" , "orange"]
w = 0.4
p = figure(plot_width = 1200, toolbar_location = "above")
for i, c in enumerate(["ramspec", "avg_ramreq"]):
p.hbar(y = y - w * (i - 1 / 2) , right = df[c] / 1024, left = 0, height = w, color = clr[i], legend = "Requested Memory" if i == 1 else "VM Memory")
p.xaxis[0].axis_label = "GB"
d = dict(zip(y, df.index))
p.yaxis[0].ticker = FixedTicker(ticks = y)
p.yaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.yaxis[0].axis_label = "VM UUID"
p.legend.location = "bottom_right"
df = df.sort_values(by = "dskspec")
p2 = figure(plot_width = 1200, toolbar_location = "above")
for i, c in enumerate(["dskspec", "avg_dskreq"]):
p2.hbar(y = y - w * (i - 1 / 2) , right = df[c], left = 0, height = w, color = clr[i], legend = "Requested Disk Size" if i == 1 else "VM Disk Size")
p2.xaxis[0].axis_label = "GB"
d = dict(zip(y, df.index))
p2.yaxis[0].ticker = FixedTicker(ticks = y)
p2.yaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p2.yaxis[0].axis_label = "VM UUID"
p2.legend.location = "bottom_right"
return column(Div(text = "<h1>Average Memory Requested For Batch VMS</h1>", width = 1200), p, Div(text = "<h1>Average Disk Requested For Batch VMS</h1>", width = 1200), p2)
# Number of Jobs Completed, Disk Usage, Memory Usage per VM Ins per Year
def fig7(conn, idx):
query = {
"query" : {
"bool" : {
"must" : [
{ "term": { "JobStatus.keyword" : "Completed"} }
]
}
},
"aggs" : {
"peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"tot_ram" : {
"sum" : {
"field" : "MemoryUsage"
}
},
"tot_dsk" : {
"sum" : {
"field" : "DiskUsage"
}
},
"vm_ins" : {
"cardinality" : {
"field" : "VMInstanceName.keyword"
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
df = pd.DataFrame()
for _ in res["aggregations"]["peryr"]["buckets"]:
yr = _["key_as_string"]
num_jobs = _["doc_count"]
jobs_per_ins = _["doc_count"] / _["vm_ins"]["value"]
ram_per_ins = _["tot_ram"]["value"] / _["vm_ins"]["value"] / 1024
dsk_per_ins = _["tot_dsk"]["value"] / _["vm_ins"]["value"] / 1024
df = df.append(pd.DataFrame([[num_jobs, jobs_per_ins, ram_per_ins, dsk_per_ins]], columns = ["num_jobs", "jobs", "ram", "dsk"], index = [yr]))
plts = [Div(text = "<h1>Basic Stats</h1>", width = 1200)]
clr = ["blue", "purple", "orange", "green"]
ylabs = ["", "", "GB", "GB"]
ttl = ["Number of Jobs Completed", "Disk Usage", "Memory Usage"]
x = [_ for _ in range(len(df))]
for i in range(len(df.columns)):
p = figure(plot_width = 800, toolbar_location = "above")
p.vbar(x = x, top = df.ix[:,i], bottom = 0, width = 0.8, color = clr[i])
if i == 0:
p.title.text = "Number of Jobs per Year"
else:
p.title.text = "{} per VM Instance".format(ttl[i - 1])
p.yaxis[0].axis_label = ylabs[i]
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
plts.append(p)
return column(plts)
def fig8(conn, idx):
reses = []
for _ in [("lt", "RequestMemory"),("gte", "VMSpec.RAM")]:
query = {
"query" : {
"bool" : {
"must" : [
{ "term": { "JobStatus.keyword" : "Completed"} },
{ "range": { "@timestamp": { _[0]: "2015-01-01" }}}
]
}
},
"aggs" : {
"peryr" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "year",
"format" : "yyyy"
},
"aggs" : {
"med_reqmem" : {
"percentiles" : {
"field": "{}".format(_[1])
}
},
"med_ratio" : {
"percentiles" : {
"script" : {
"lang" : "painless",
"inline" : "doc['MemoryUsage'].value / doc['{}'].value".format(_[1])
}
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
reses.append(res)
df = pd.DataFrame()
for __ in reses:
for _ in __["aggregations"]["peryr"]["buckets"]:
yr = _["key_as_string"]
med_ratio = _["med_ratio"]["values"]["50.0"]
med_reqmem = _["med_reqmem"]["values"]["50.0"]
df = df.append(pd.DataFrame([[med_reqmem / 1024, med_ratio]], columns = ["med_mem", "med_ratio"], index = [yr]))
plts = []
clr = ["blue", "purple", "orange", "green"]
ylabs = ["GB", ""]
ttl = ["Requested Memory", "Memory Usage / Requested Memory Ratio"]
x = [_ for _ in range(len(df))]
for i in range(len(df.columns)):
p = figure(plot_width = 800, toolbar_location = "above", y_axis_type = "log")
if i == 1:
p.y_range = Range1d(0.001, 1.1)
p.vbar(x = x, top = df.ix[:,i], bottom = 0, width = 0.8, color = clr[i])
p.title.text = "Median of {} for Batch Jobs".format(ttl[i])
p.yaxis[0].axis_label = ylabs[i]
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
plts.append(p)
return column(plts)
def fig9(conn, idx):
reses = []
for _ in [("lt", "RequestMemory"),("gte", "VMSpec.RAM")]:
query = {
"query" : {
"bool" : {
"must" : [
{ "term": { "JobStatus.keyword" : "Completed"} },
{ "range": { "@timestamp": { _[0]: "2015-01-01" }}}
]
}
},
"aggs" : {
"per_proj" : {
"terms" : {
"field" : "Project.keyword",
"size" : 100
},
"aggs" : {
"memusg" : {
"avg" : {
"field": "MemoryUsage"
}
},
"reqmem" : {
"avg" : {
"field" : "{}".format(_[1])
}
}
}
}
}
}
res = conn.search(index = idx, body = query)
reses.append(res)
df = pd.DataFrame()
for __ in reses:
for _ in __["aggregations"]["per_proj"]["buckets"]:
proj = _["key"]
reqmem = _["reqmem"]["value"]
memusg = _["memusg"]["value"]
df = df.append(pd.DataFrame([[proj, reqmem / 1024, memusg / 1024]], columns = ["proj", "reqmem", "memusg"]))
df = df.groupby("proj").sum().sort_values("reqmem")
y = np.array([_ for _ in range(len(df))])
clr = ["purple", "orange"]
w = 0.4
p = figure(plot_width = 800, toolbar_location = "above")
for i, c in enumerate(["reqmem", "memusg"]):
p.hbar(y = y - w * (i - 1 / 2) , right = df[c], left = 0, height = w, color = clr[i], legend = "Requested Memory" if i == 0 else "Memory Usage")
p.xaxis[0].axis_label = "GB"
d = dict(zip(y, df.index))
p.yaxis[0].ticker = FixedTicker(ticks = y)
p.yaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) +
"""
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.yaxis[0].axis_label = "Projects"
p.legend.location = "bottom_right"
return column(Div(text = "<h1>Average Memory Usage & Requested Memory for Batch VMs</h1>", width = 1200), p)
def fig10(conn):
df = pd.DataFrame()
for idx in ["logs-tomcat", "logs-condor"]:
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "proc_ws" } },
{ "term" : { "method" : "post" } }
]
}
},
"aggs" : {
"permo" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM"
},
"aggs" : {
"unique_user" : {
"cardinality" : {
"field": "user.keyword"
}
}
}
}
}
}
if idx == "logs-condor":
query["query"] = { "match_all" : {} }
query["aggs"]["permo"]["aggs"]["unique_user"]["cardinality"]["field"] = "Owner.keyword"
res = conn.search(index = idx, body = query)
for _ in res["aggregations"]["permo"]["buckets"]:
df = df.append(pd.DataFrame([[_["unique_user"]["value"]]], columns = [idx], index = [_["key_as_string"]]))
df = df.groupby(df.index).sum().dropna()
df.columns = ["HTCondor", "Web Service"]
x = np.array([_ for _ in range(len(df))])
p = figure(plot_width = 800, toolbar_location = "above")
p.vbar(x = x - 0.2, top = df["HTCondor"], bottom = 0, width = 0.4, legend = "HTCondor", color = "purple")
p.vbar(x = x + 0.2, top = df["Web Service"], bottom = 0, width = 0.4, legend = "Web Service", color = "blue")
p.legend.location = "top_right"
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) + """
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.xaxis.major_label_orientation = np.pi / 4
p.title.text = "Users Submitting Jobs by Web Service and Directly by HTCondor"
return p
def fig11(conn):
df = pd.DataFrame()
for m in ["post", "get"]:
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "proc_ws" } },
{ "term" : { "method" : m } }
]
}
},
"aggs" : {
"permo" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM"
}
}
}
}
res = conn.search(index = "logs-tomcat", body = query)
for _ in res["aggregations"]["permo"]["buckets"]:
df = df.append(pd.DataFrame([[_["doc_count"]]], columns = [m], index = [_["key_as_string"]]))
df = df.groupby(df.index).sum().dropna()
df.columns = ["Job Submission", "Job Status"]
x = np.array([_ for _ in range(len(df))])
p = figure(plot_width = 800, toolbar_location = "above")
p.vbar(x = x - 0.2, top = df["Job Submission"], bottom = 0, width = 0.4, legend = "Job Submission", color = "purple")
p.vbar(x = x + 0.2, top = df["Job Status"], bottom = 0, width = 0.4, legend = "Job Status", color = "blue")
p.legend.location = "top_right"
d = dict(zip(x, df.index))
p.xaxis[0].ticker = FixedTicker(ticks = x)
p.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) + """
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p.xaxis.major_label_orientation = np.pi / 4
p.title.text = "Requests for Job Queue Status and Job Submission"
return p
def fig12(conn):
df = pd.DataFrame()
for m in ["post", "get"]:
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "proc_ws" } },
{ "term" : { "method" : m } }
]
}
},
"aggs" : {
"dur_hist" : {
"histogram" : {
"field" : "time",
"interval" : 100,
"extended_bounds" : {
"min" : 0,
"max" : 40000
}
}
}
}
}
res = conn.search(index = "logs-tomcat", body = query)
for _ in res["aggregations"]["dur_hist"]["buckets"]:
df = df.append(pd.DataFrame([[_["doc_count"]]], columns = [m], index = [_["key"]]))
df = df.groupby(df.index).sum().dropna()
df.columns = ["Job Submission", "Job Status"]
x = np.array([_ for _ in range(len(df))])
p = figure(plot_width = 1200, toolbar_location = "above", y_axis_type = "log")
p.vbar(x = x - 0.2, top = df["Job Submission"], bottom = 0, width = 0.4, legend = "Job Submission", color = "purple")
p.vbar(x = x + 0.2, top = df["Job Status"], bottom = 0, width = 0.4, legend = "Job Status", color = "blue")
p.legend.location = "top_right"
p.title.text = "Web Service Requests for Job Status and Job Submission"
return p
def fig13(conn):
df = pd.DataFrame()
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "term" : { "service" : "proc_ws" } },
{ "term" : { "method" : "post" } }
],
"must_not" : { "exists" : { "field" : "message" } }
}
},
"aggs" : {
"permo" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM"
}
}
}
}
res = conn.search(index = "logs-tomcat", body = query)
for _ in res["aggregations"]["permo"]["buckets"]:
df = df.append(pd.DataFrame([[_["doc_count"]]], columns = ["proc_ws"], index = [_["key_as_string"]]))
query = {
"size" : 0,
"query" : {
"bool" : {
"must" : [
{ "range" : { "ClusterId" : { "gte" : 0 } } },
{ "range" : { "ProcId" : { "gte" : 0 } } }
]
}
},
"aggs" : {
"permo" : {
"date_histogram" : {
"field" : "@timestamp",
"interval" : "month",
"format" : "yyyy-MM"
},
"aggs" : {
"uniq_clusterid" : {
"cardinality" : {
"field" : "ClusterId"
}
}
}
}
}
}
res = conn.search(index = "logs-condor", body = query)
for _ in res["aggregations"]["permo"]["buckets"]:
df = df.append(pd.DataFrame([[_["uniq_clusterid"]["value"]]], columns = ["condor"], index = [_["key_as_string"]]))
df = df.groupby(df.index).sum()
df = df[df.index < "2017-01"]
df["ratio"] = df["proc_ws"] / df["condor"]
df = df.dropna(how = "any")
p1 = figure(width = 800, title = "Average Ratio of Web Service submissions over HTCondor Submissions")
x = [_ for _ in range(len(df))]
p1.vbar(x = x, top = df["ratio"], bottom = 0, width = 0.8)
d = dict(zip(x, df.index))
p1.xaxis[0].ticker = FixedTicker(ticks = x)
p1.xaxis[0].formatter = FuncTickFormatter(code = """dic = """ + str(d) + """
if (tick in dic) {
return dic[tick]
}
else {
return ''
}""")
p1.yaxis[0].axis_label = "Ratio"
p1.xaxis.major_label_orientation = np.pi / 4
return p1
if __name__ == "__main__":
conn = Init(timeout = 300).connect()
#fig1("logs-condor", conn)
#fig2("logs-condor", conn)
#fig3("logs-condor", conn)
#fig4("logs-condor", conn)
#fig5("logs-condor", conn)
#fig6("logs-condor", conn)
#fig7("logs-condor", conn)
#fig8("logs-condor", conn)
#fig9("logs-condor", conn)
#fig10(conn)
#fig11(conn)
#fig12(conn)
fig13(conn)
#test()
| mit | 2,742,119,125,383,871,000 | 27.420479 | 174 | 0.524645 | false |
ramnes/qtile | libqtile/widget/spacer.py | 2 | 2695 | # Copyright (c) 2008, 2010 Aldo Cortesi
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012 Tim Neumann
# Copyright (c) 2012 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile import bar
from libqtile.log_utils import logger
from libqtile.widget import base
class Spacer(base._Widget):
"""Just an empty space on the bar
Often used with length equal to bar.STRETCH to push bar widgets to the
right or bottom edge of the screen.
Parameters
==========
length :
Length of the widget. Can be either ``bar.STRETCH`` or a length in
pixels.
width :
DEPRECATED, same as ``length``.
"""
orientations = base.ORIENTATION_BOTH
defaults = [
("background", None, "Widget background color")
]
def __init__(self, length=bar.STRETCH, width=None, **config):
"""
"""
# 'width' was replaced by 'length' since the widget can be installed in
# vertical bars
if width is not None:
logger.warning('width kwarg or positional argument is '
'deprecated. Please use length.')
length = width
base._Widget.__init__(self, length, **config)
self.add_defaults(Spacer.defaults)
def draw(self):
if self.length > 0:
self.drawer.clear(self.background or self.bar.background)
if self.bar.horizontal:
self.drawer.draw(offsetx=self.offset, width=self.length)
else:
self.drawer.draw(offsety=self.offset, height=self.length)
| mit | -4,544,435,534,959,907,000 | 37.5 | 79 | 0.682004 | false |
desmovalvo/virtualsib-part2bis | test/sp2b/python/query.py | 1 | 9410 | #!/usr/bin/python
# requirements
import sys
import pygal
import getopt
import timeit
from SIBLib import *
# Queries
q1 = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX bench: <http://localhost/vocabulary/bench/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
SELECT ?yr
WHERE {
?journal rdf:type bench:Journal .
?journal dc:title "Journal 1 (1940)"^^xsd:string .
?journal dcterms:issued ?yr
}"""
q2 = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX swrc: <http://swrc.ontoware.org/ontology#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX bench: <http://localhost/vocabulary/bench/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX dcterms: <http://purl.org/dc/terms/>
SELECT ?inproc ?author ?booktitle ?title
?proc ?ee ?page ?url ?yr ?abstract
WHERE {
?inproc rdf:type bench:Inproceedings .
?inproc dc:creator ?author .
?inproc bench:booktitle ?booktitle .
?inproc dc:title ?title .
?inproc dcterms:partOf ?proc .
?inproc rdfs:seeAlso ?ee .
?inproc swrc:pages ?page .
?inproc foaf:homepage ?url .
?inproc dcterms:issued ?yr
OPTIONAL {
?inproc bench:abstract ?abstract
}
}
ORDER BY ?yr"""
q3a = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX bench: <http://localhost/vocabulary/bench/>
PREFIX swrc: <http://swrc.ontoware.org/ontology#>
SELECT ?article
WHERE {
?article rdf:type bench:Article .
?article ?property ?value
FILTER (?property=swrc:pages)
}"""
q3b = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX bench: <http://localhost/vocabulary/bench/>
PREFIX swrc: <http://swrc.ontoware.org/ontology#>
SELECT ?article
WHERE {
?article rdf:type bench:Article .
?article ?property ?value
FILTER (?property=swrc:month)
}"""
q3c = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX swrc: <http://swrc.ontoware.org/ontology#>
PREFIX bench: <http://localhost/vocabulary/bench/>
SELECT ?article
WHERE {
?article rdf:type bench:Article .
?article ?property ?value
FILTER (?property=swrc:isbn)
}"""
q4 = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX bench: <http://localhost/vocabulary/bench/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX dcterms: <http://purl.org/dc/terms/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX swrc: <http://swrc.ontoware.org/ontology#>
SELECT DISTINCT ?name1 ?name2
WHERE {
?article1 rdf:type bench:Article .
?article2 rdf:type bench:Article .
?article1 dc:creator ?author1 .
?author1 foaf:name ?name1 .
?article2 dc:creator ?author2 .
?author2 foaf:name ?name2 .
?article1 swrc:journal ?journal .
?article2 swrc:journal ?journal
FILTER (?name1<?name2)
}"""
q5a = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX bench: <http://localhost/vocabulary/bench/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
SELECT DISTINCT ?person ?name
WHERE {
?article rdf:type bench:Article .
?article dc:creator ?person .
?inproc rdf:type bench:Inproceedings .
?inproc dc:creator ?person2 .
?person foaf:name ?name .
?person2 foaf:name ?name2
FILTER (?name=?name2)
}"""
q5b = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX bench: <http://localhost/vocabulary/bench/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
SELECT DISTINCT ?person ?name
WHERE {
?article rdf:type bench:Article .
?article dc:creator ?person .
?inproc rdf:type bench:Inproceedings .
?inproc dc:creator ?person .
?person foaf:name ?name
}"""
q6 = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX dcterms: <http://purl.org/dc/terms/>
SELECT ?yr ?name ?document
WHERE {
?class rdfs:subClassOf foaf:Document .
?document rdf:type ?class .
?document dcterms:issued ?yr .
?document dc:creator ?author .
?author foaf:name ?name
OPTIONAL {
?class2 rdfs:subClassOf foaf:Document .
?document2 rdf:type ?class2 .
?document2 dcterms:issued ?yr2 .
?document2 dc:creator ?author2
FILTER (?author=?author2 && ?yr2<?yr)
} FILTER (!bound(?author2))
}"""
q7 = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
PREFIX dcterms: <http://purl.org/dc/terms/>
SELECT DISTINCT ?title
WHERE {
?class rdfs:subClassOf foaf:Document .
?doc rdf:type ?class .
?doc dc:title ?title .
?bag2 ?member2 ?doc .
?doc2 dcterms:references ?bag2
OPTIONAL {
?class3 rdfs:subClassOf foaf:Document .
?doc3 rdf:type ?class3 .
?doc3 dcterms:references ?bag3 .
?bag3 ?member3 ?doc
OPTIONAL {
?class4 rdfs:subClassOf foaf:Document .
?doc4 rdf:type ?class4 .
?doc4 dcterms:references ?bag4 .
?bag4 ?member4 ?doc3
} FILTER (!bound(?doc4))
} FILTER (!bound(?doc3))
}"""
q8 = """PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX dc: <http://purl.org/dc/elements/1.1/>
SELECT DISTINCT ?name
WHERE {
?erdoes rdf:type foaf:Person .
?erdoes foaf:name "Paul Erdoes"^^xsd:string .
{
?document dc:creator ?erdoes .
?document dc:creator ?author .
?document2 dc:creator ?author .
?document2 dc:creator ?author2 .
?author2 foaf:name ?name
FILTER (?author!=?erdoes &&
?document2!=?document &&
?author2!=?erdoes &&
?author2!=?author)
} UNION {
?document dc:creator ?erdoes.
?document dc:creator ?author.
?author foaf:name ?name
FILTER (?author!=?erdoes)
}
}"""
q9 = """PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT DISTINCT ?predicate
WHERE {
{
?person rdf:type foaf:Person .
?subject ?predicate ?person
} UNION {
?person rdf:type foaf:Person .
?person ?predicate ?object
}
}"""
q10 = """PREFIX person: <http://localhost/persons/>
SELECT ?subject ?predicate
WHERE {
?subject ?predicate person:Paul_Erdoes
}"""
q11 = """PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?ee
WHERE {
?publication rdfs:seeAlso ?ee
}
ORDER BY ?ee
LIMIT 10
OFFSET 50"""
short_queries = [q1, q3b, q8, q9, q10, q11]
long_queries = [q2, q3a, q4, q5a, q5b, q6]
queries = long_queries
# read the parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "r:v:i:", ["realsib=", "virtual_sib", "iterations="])
for opt, arg in opts:
if opt in ("-r", "--realsib"):
realsib_ip = arg.split(":")[0]
realsib_port = int(arg.split(":")[1])
elif opt in ("-v", "--vsib"):
vsib_ip = arg.split(":")[0]
vsib_port = int(arg.split(":")[1])
elif opt in ("-i", "--iterations"):
iterations = int(arg)
else:
print "Unrecognized option " + str(opt)
if not(realsib_ip and realsib_port and vsib_ip and vsib_port and iterations):
print 'Usage: python sp2b-query-test.py -r realsib_ip:port -v vsib_ip:port -i iterations'
sys.exit()
except getopt.GetoptError:
print 'Usage: python sp2b-query-test.py -r realsib_ip:port -v vsib_ip:port -i iterations'
sys.exit()
print "ready to begin"
################ Measures ##########
kp_list = []
# Connection to real sib
kp0 = SibLib(realsib_ip, realsib_port)
kp_list.append(kp0)
print "connected to real sib"
# Connection to virtual sib
kp1 = SibLib(vsib_ip, vsib_port)
kp_list.append(kp1)
print "connected to virtual sib"
global_results = []
for kp in kp_list:
kp_results = []
for q in queries:
print "query " + str(queries.index(q))
# calculate results for query q
iteration_results = []
for i in xrange(iterations):
print "* iteration " + str(i)
end = timeit.timeit(lambda: kp.execute_sparql_query(q), number=1)
iteration_results.append(end)
iteration_results.sort()
if len(iteration_results)%2 == 0:
median = (iteration_results[len(iteration_results)/2] + iteration_results[len(iteration_results)/2-1]) / 2
else:
median = iteration_results[len(iteration_results)/2]
kp_results.append(round(median * 1000, 2))
global_results.append(kp_results)
################ Measures ##########
# Creating the graph
print "Drawing the graph..."
bar_chart = pygal.Bar(human_readable=True, x_title='Triples inserted at once', y_title='Time (in milliseconds)', x_label_rotation = 60)
bar_chart.title = 'Insertion times increasing the number of triples'
# adding results
bar_chart.add('Real SIB', global_results[0])
bar_chart.add('Virtual SIB', global_results[1])
# saving the graph
filename_template = """sp2b-query-test_%siter.svg"""
filename = filename_template % (iterations)
bar_chart.render_to_file(filename)
| lgpl-3.0 | -6,130,149,182,655,621,000 | 27.953846 | 135 | 0.643464 | false |
etingof/pyasn1 | tests/type/test_useful.py | 2 | 4718 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2020, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pyasn1/license.html
#
import datetime
import pickle
import sys
from copy import deepcopy
import unittest
from tests.base import BaseTestCase
from pyasn1.type import useful
class FixedOffset(datetime.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return datetime.timedelta(0)
UTC = FixedOffset(0, 'UTC')
UTC2 = FixedOffset(120, 'UTC')
class ObjectDescriptorTestCase(BaseTestCase):
pass
class GeneralizedTimeTestCase(BaseTestCase):
def testFromDateTime(self):
assert useful.GeneralizedTime.fromDateTime(datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC)) == '20170711000102.3Z'
def testToDateTime0(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2) == useful.GeneralizedTime('20170711000102').asDateTime
def testToDateTime1(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.GeneralizedTime('20170711000102Z').asDateTime
def testToDateTime2(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102.3Z').asDateTime
def testToDateTime3(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102,3Z').asDateTime
def testToDateTime4(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC) == useful.GeneralizedTime('20170711000102.3+0000').asDateTime
def testToDateTime5(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC2) == useful.GeneralizedTime('20170711000102.3+0200').asDateTime
def testToDateTime6(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, 3000, tzinfo=UTC2) == useful.GeneralizedTime('20170711000102.3+02').asDateTime
def testToDateTime7(self):
assert datetime.datetime(2017, 7, 11, 0, 1) == useful.GeneralizedTime('201707110001').asDateTime
def testToDateTime8(self):
assert datetime.datetime(2017, 7, 11, 0) == useful.GeneralizedTime('2017071100').asDateTime
def testCopy(self):
dt = useful.GeneralizedTime("20170916234254+0130").asDateTime
assert dt == deepcopy(dt)
class GeneralizedTimePicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = useful.GeneralizedTime()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == useful.GeneralizedTime
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = useful.GeneralizedTime("20170916234254+0130")
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1 == old_asn1
class UTCTimeTestCase(BaseTestCase):
def testFromDateTime(self):
assert useful.UTCTime.fromDateTime(datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC)) == '170711000102Z'
def testToDateTime0(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2) == useful.UTCTime('170711000102').asDateTime
def testToDateTime1(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.UTCTime('170711000102Z').asDateTime
def testToDateTime2(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC) == useful.UTCTime('170711000102+0000').asDateTime
def testToDateTime3(self):
assert datetime.datetime(2017, 7, 11, 0, 1, 2, tzinfo=UTC2) == useful.UTCTime('170711000102+0200').asDateTime
def testToDateTime4(self):
assert datetime.datetime(2017, 7, 11, 0, 1) == useful.UTCTime('1707110001').asDateTime
class UTCTimePicklingTestCase(unittest.TestCase):
def testSchemaPickling(self):
old_asn1 = useful.UTCTime()
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert type(new_asn1) == useful.UTCTime
assert old_asn1.isSameTypeWith(new_asn1)
def testValuePickling(self):
old_asn1 = useful.UTCTime("170711000102")
serialised = pickle.dumps(old_asn1)
assert serialised
new_asn1 = pickle.loads(serialised)
assert new_asn1 == old_asn1
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| bsd-2-clause | -5,369,962,743,174,861,000 | 33.188406 | 135 | 0.688215 | false |
nathanielvarona/airflow | tests/providers/jenkins/operators/test_jenkins_job_trigger.py | 3 | 12070 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import Mock, patch
import jenkins
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.jenkins.hooks.jenkins import JenkinsHook
from airflow.providers.jenkins.operators.jenkins_job_trigger import JenkinsJobTriggerOperator
class TestJenkinsOperator(unittest.TestCase):
@parameterized.expand(
[
(
"dict params",
{'a_param': 'blip', 'another_param': '42'},
),
(
"string params",
'{"second_param": "beep", "third_param": "153"}',
),
(
"list params",
['final_one', 'bop', 'real_final', 'eggs'],
),
]
)
def test_execute(self, _, parameters):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_build_info.return_value = {
'result': 'SUCCESS',
'url': 'http://aaa.fake-url.com/congratulation/its-a-job',
}
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
task_id="operator_test",
job_name="a_job_on_jenkins",
parameters=parameters,
sleep_time=1,
)
operator.execute(None)
assert jenkins_mock.get_build_info.call_count == 1
jenkins_mock.get_build_info.assert_called_once_with(name='a_job_on_jenkins', number='1')
@parameterized.expand(
[
(
"dict params",
{'a_param': 'blip', 'another_param': '42'},
),
(
"string params",
'{"second_param": "beep", "third_param": "153"}',
),
(
"list params",
['final_one', 'bop', 'real_final', 'eggs'],
),
]
)
def test_execute_job_polling_loop(self, _, parameters):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.side_effect = [
{'result': None},
{'result': 'SUCCESS', 'url': 'http://aaa.fake-url.com/congratulation/its-a-job'},
]
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
parameters=parameters,
sleep_time=1,
)
operator.execute(None)
assert jenkins_mock.get_build_info.call_count == 2
@parameterized.expand(
[
(
"dict params",
{'a_param': 'blip', 'another_param': '42'},
),
(
"string params",
'{"second_param": "beep", "third_param": "153"}',
),
(
"list params",
['final_one', 'bop', 'real_final', 'eggs'],
),
]
)
def test_execute_job_failure(self, _, parameters):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.return_value = {
'result': 'FAILURE',
'url': 'http://aaa.fake-url.com/congratulation/its-a-job',
}
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
parameters=parameters,
jenkins_connection_id="fake_jenkins_connection",
# The hook is mocked, this connection won't be used
sleep_time=1,
)
with pytest.raises(AirflowException):
operator.execute(None)
@parameterized.expand(
[
(
'SUCCESS',
['SUCCESS', 'UNSTABLE'],
),
(
'UNSTABLE',
['SUCCESS', 'UNSTABLE'],
),
(
'UNSTABLE',
['UNSTABLE'],
),
(
'SUCCESS',
None,
),
]
)
def test_allowed_jenkins_states(self, state, allowed_jenkins_states):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.return_value = {
'result': state,
'url': 'http://aaa.fake-url.com/congratulation/its-a-job',
}
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
allowed_jenkins_states=allowed_jenkins_states,
# The hook is mocked, this connection won't be used
sleep_time=1,
)
try:
operator.execute(None)
except AirflowException:
pytest.fail(f'Job failed with state={state} while allowed states={allowed_jenkins_states}')
@parameterized.expand(
[
(
'FAILURE',
['SUCCESS', 'UNSTABLE'],
),
(
'UNSTABLE',
['SUCCESS'],
),
(
'SUCCESS',
['UNSTABLE'],
),
(
'FAILURE',
None,
),
(
'UNSTABLE',
None,
),
]
)
def test_allowed_jenkins_states_failure(self, state, allowed_jenkins_states):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret')
jenkins_mock.get_job_info.return_value = {'nextBuildNumber': '1'}
jenkins_mock.get_build_info.return_value = {
'result': state,
'url': 'http://aaa.fake-url.com/congratulation/its-a-job',
}
jenkins_mock.build_job_url.return_value = 'http://www.jenkins.url/somewhere/in/the/universe'
hook_mock = Mock(spec=JenkinsHook)
hook_mock.get_jenkins_server.return_value = jenkins_mock
with patch.object(JenkinsJobTriggerOperator, "get_hook") as get_hook_mocked, patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
mock_make_request.side_effect = [
{'body': '', 'headers': {'Location': 'http://what-a-strange.url/18'}},
{'body': '{"executable":{"number":"1"}}', 'headers': {}},
]
get_hook_mocked.return_value = hook_mock
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="operator_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
allowed_jenkins_states=allowed_jenkins_states,
# The hook is mocked, this connection won't be used
sleep_time=1,
)
with pytest.raises(AirflowException):
operator.execute(None)
def test_build_job_request_settings(self):
jenkins_mock = Mock(spec=jenkins.Jenkins, auth='secret', timeout=2)
jenkins_mock.build_job_url.return_value = 'http://apache.org'
with patch(
'airflow.providers.jenkins.operators.jenkins_job_trigger.jenkins_request_with_headers'
) as mock_make_request:
operator = JenkinsJobTriggerOperator(
dag=None,
task_id="build_job_test",
job_name="a_job_on_jenkins",
jenkins_connection_id="fake_jenkins_connection",
)
operator.build_job(jenkins_mock)
mock_request = mock_make_request.call_args_list[0][0][1]
assert mock_request.method == 'POST'
assert mock_request.url == 'http://apache.org'
| apache-2.0 | -7,008,159,877,831,536,000 | 38.061489 | 107 | 0.544656 | false |
sorenk/ansible | test/units/module_utils/urls/test_open_url.py | 12 | 9742 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import datetime
import os
from ansible.module_utils.urls import open_url, urllib_request, HAS_SSLCONTEXT, cookiejar, ConnectionError, RequestWithMethod
from ansible.module_utils.urls import SSLValidationHandler, HTTPSClientAuthHandler, RedirectHandlerFactory
import pytest
if HAS_SSLCONTEXT:
import ssl
@pytest.fixture
def urlopen_mock(mocker):
return mocker.patch('ansible.module_utils.urls.urllib_request.urlopen')
@pytest.fixture
def install_opener_mock(mocker):
return mocker.patch('ansible.module_utils.urls.urllib_request.install_opener')
def test_open_url(urlopen_mock, install_opener_mock):
r = open_url('https://ansible.com/')
args = urlopen_mock.call_args[0]
assert args[1] is None # data, this is handled in the Request not urlopen
assert args[2] == 10 # timeout
req = args[0]
assert req.headers == {}
assert req.data is None
assert req.get_method() == 'GET'
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
SSLValidationHandler,
RedirectHandlerFactory(), # factory, get handler
)
found_handlers = []
for handler in handlers:
if isinstance(handler, SSLValidationHandler) or handler.__class__.__name__ == 'RedirectHandler':
found_handlers.append(handler)
assert len(found_handlers) == 2
def test_open_url_http(urlopen_mock, install_opener_mock):
r = open_url('http://ansible.com/')
args = urlopen_mock.call_args[0]
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
found_handlers = []
for handler in handlers:
if isinstance(handler, SSLValidationHandler):
found_handlers.append(handler)
assert len(found_handlers) == 0
def test_open_url_ftp(urlopen_mock, install_opener_mock, mocker):
mocker.patch('ansible.module_utils.urls.ParseResultDottedDict.as_list', side_effect=AssertionError)
# Using ftp scheme should prevent the AssertionError side effect to fire
r = open_url('ftp://[email protected]/')
def test_open_url_headers(urlopen_mock, install_opener_mock):
r = open_url('http://ansible.com/', headers={'Foo': 'bar'})
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers == {'Foo': 'bar'}
def test_open_url_username(urlopen_mock, install_opener_mock):
r = open_url('http://ansible.com/', url_username='user')
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 2
assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user', None)}
def test_open_url_username_in_url(urlopen_mock, install_opener_mock):
r = open_url('http://[email protected]/')
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert found_handlers[0].passwd.passwd[None] == {(('ansible.com', '/'),): ('user2', '')}
def test_open_url_username_force_basic(urlopen_mock, install_opener_mock):
r = open_url('http://ansible.com/', url_username='user', url_password='passwd', force_basic_auth=True)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 0
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
def test_open_url_auth_in_netloc(urlopen_mock, install_opener_mock):
r = open_url('http://user:[email protected]/')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.get_full_url() == 'http://ansible.com/'
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
expected_handlers = (
urllib_request.HTTPBasicAuthHandler,
urllib_request.HTTPDigestAuthHandler,
)
found_handlers = []
for handler in handlers:
if isinstance(handler, expected_handlers):
found_handlers.append(handler)
assert len(found_handlers) == 2
def test_open_url_netrc(urlopen_mock, install_opener_mock, monkeypatch):
here = os.path.dirname(__file__)
monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc'))
r = open_url('http://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Authorization') == b'Basic dXNlcjpwYXNzd2Q='
r = open_url('http://foo.ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert 'Authorization' not in req.headers
monkeypatch.setenv('NETRC', os.path.join(here, 'fixtures/netrc.nonexistant'))
r = open_url('http://ansible.com/')
args = urlopen_mock.call_args[0]
req = args[0]
assert 'Authorization' not in req.headers
def test_open_url_no_proxy(urlopen_mock, install_opener_mock, mocker):
build_opener_mock = mocker.patch('ansible.module_utils.urls.urllib_request.build_opener')
r = open_url('http://ansible.com/', use_proxy=False)
handlers = build_opener_mock.call_args[0]
found_handlers = []
for handler in handlers:
if isinstance(handler, urllib_request.ProxyHandler):
found_handlers.append(handler)
assert len(found_handlers) == 1
@pytest.mark.skipif(not HAS_SSLCONTEXT, reason="requires SSLContext")
def test_open_url_no_validate_certs(urlopen_mock, install_opener_mock):
r = open_url('https://ansible.com/', validate_certs=False)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
ssl_handler = None
for handler in handlers:
if isinstance(handler, HTTPSClientAuthHandler):
ssl_handler = handler
break
assert ssl_handler is not None
context = ssl_handler._context
assert context.protocol == ssl.PROTOCOL_SSLv23
assert context.options & ssl.OP_NO_SSLv2
assert context.options & ssl.OP_NO_SSLv3
assert context.verify_mode == ssl.CERT_NONE
assert context.check_hostname is False
def test_open_url_client_cert(urlopen_mock, install_opener_mock):
here = os.path.dirname(__file__)
client_cert = os.path.join(here, 'fixtures/client.pem')
client_key = os.path.join(here, 'fixtures/client.key')
r = open_url('https://ansible.com/', client_cert=client_cert, client_key=client_key)
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
ssl_handler = None
for handler in handlers:
if isinstance(handler, HTTPSClientAuthHandler):
ssl_handler = handler
break
assert ssl_handler is not None
assert ssl_handler.client_cert == client_cert
assert ssl_handler.client_key == client_key
https_connection = ssl_handler._build_https_connection('ansible.com')
assert https_connection.key_file == client_key
assert https_connection.cert_file == client_cert
def test_open_url_cookies(urlopen_mock, install_opener_mock):
r = open_url('https://ansible.com/', cookies=cookiejar.CookieJar())
opener = install_opener_mock.call_args[0][0]
handlers = opener.handlers
cookies_handler = None
for handler in handlers:
if isinstance(handler, urllib_request.HTTPCookieProcessor):
cookies_handler = handler
break
assert cookies_handler is not None
def test_open_url_invalid_method(urlopen_mock, install_opener_mock):
with pytest.raises(ConnectionError):
r = open_url('https://ansible.com/', method='BOGUS')
def test_open_url_custom_method(urlopen_mock, install_opener_mock):
r = open_url('https://ansible.com/', method='DELETE')
args = urlopen_mock.call_args[0]
req = args[0]
assert isinstance(req, RequestWithMethod)
def test_open_url_user_agent(urlopen_mock, install_opener_mock):
r = open_url('https://ansible.com/', http_agent='ansible-tests')
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('User-agent') == 'ansible-tests'
def test_open_url_force(urlopen_mock, install_opener_mock):
r = open_url('https://ansible.com/', force=True, last_mod_time=datetime.datetime.now())
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('Cache-control') == 'no-cache'
assert 'If-modified-since' not in req.headers
def test_open_url_last_mod(urlopen_mock, install_opener_mock):
now = datetime.datetime.now()
r = open_url('https://ansible.com/', last_mod_time=now)
args = urlopen_mock.call_args[0]
req = args[0]
assert req.headers.get('If-modified-since') == now.strftime('%a, %d %b %Y %H:%M:%S +0000')
def test_open_url_headers_not_dict(urlopen_mock, install_opener_mock):
with pytest.raises(ValueError):
r = open_url('https://ansible.com/', headers=['bob'])
| gpl-3.0 | 6,766,792,710,714,387,000 | 30.025478 | 125 | 0.675221 | false |
jenrik/guake | src/guake/about.py | 15 | 1602 | # -*- coding: utf-8; -*-
"""
Copyright (C) 2007-2012 Lincoln de Sousa <[email protected]>
Copyright (C) 2007 Gabriel Falcão <[email protected]>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA
"""
from __future__ import absolute_import
import gtk
from guake.common import _
from guake.common import gladefile
from guake.common import pixmapfile
from guake.globals import VERSION
from guake.simplegladeapp import SimpleGladeApp
class AboutDialog(SimpleGladeApp):
"""The About Guake dialog class
"""
def __init__(self):
super(AboutDialog, self).__init__(gladefile('about.glade'),
root='aboutdialog')
dialog = self.get_widget('aboutdialog')
# images
ipath = pixmapfile('guake-notification.png')
img = gtk.gdk.pixbuf_new_from_file(ipath)
dialog.set_property('logo', img)
dialog.set_name(_('Guake Terminal'))
dialog.set_version(VERSION)
| gpl-2.0 | -3,269,167,323,656,883,700 | 32.354167 | 67 | 0.715178 | false |
redhatrises/freeipa | ipaserver/install/plugins/update_nis.py | 4 | 3156 | #
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from ipalib.plugable import Registry
from ipalib import errors
from ipalib import Updater
from ipaplatform.paths import paths
from ipapython.dn import DN
from ipaserver.install import sysupgrade
from ipaserver.install.ldapupdate import LDAPUpdate
register = Registry()
@register()
class update_nis_configuration(Updater):
"""Update NIS configuration
NIS configuration can be updated only if NIS Server was configured via
ipa-nis-manage command.
"""
def __recover_from_missing_maps(self, ldap):
# https://fedorahosted.org/freeipa/ticket/5507
# if all following DNs are missing, but 'NIS Server' container exists
# we are experiencig bug and maps should be fixed
if sysupgrade.get_upgrade_state('nis',
'done_recover_from_missing_maps'):
# this recover must be done only once, a user may deleted some
# maps, we do not want to restore them again
return
self.log.debug("Recovering from missing NIS maps bug")
suffix = "cn=NIS Server,cn=plugins,cn=config"
domain = self.api.env.domain
missing_dn_list = [
DN(nis_map.format(domain=domain, suffix=suffix)) for nis_map in [
"nis-domain={domain}+nis-map=passwd.byname,{suffix}",
"nis-domain={domain}+nis-map=passwd.byuid,{suffix}",
"nis-domain={domain}+nis-map=group.byname,{suffix}",
"nis-domain={domain}+nis-map=group.bygid,{suffix}",
"nis-domain={domain}+nis-map=netid.byname,{suffix}",
"nis-domain={domain}+nis-map=netgroup,{suffix}",
]
]
for dn in missing_dn_list:
try:
ldap.get_entry(dn, attrs_list=['cn'])
except errors.NotFound:
pass
else:
# bug is not effective, at least one of 'possible missing'
# maps was detected
return
sysupgrade.set_upgrade_state('nis', 'done_recover_from_missing_maps',
True)
# bug is effective run update to recreate missing maps
ld = LDAPUpdate(sub_dict={}, ldapi=True)
ld.update([paths.NIS_ULDIF])
def execute(self, **options):
ldap = self.api.Backend.ldap2
dn = DN(('cn', 'NIS Server'), ('cn', 'plugins'), ('cn', 'config'))
try:
ldap.get_entry(dn, attrs_list=['cn'])
except errors.NotFound:
# NIS is not configured on system, do not execute update
self.log.debug("Skipping NIS update, NIS Server is not configured")
# container does not exist, bug #5507 is not effective
sysupgrade.set_upgrade_state(
'nis', 'done_recover_from_missing_maps', True)
else:
self.__recover_from_missing_maps(ldap)
self.log.debug("Executing NIS Server update")
ld = LDAPUpdate(sub_dict={}, ldapi=True)
ld.update([paths.NIS_UPDATE_ULDIF])
return False, ()
| gpl-3.0 | 3,364,193,865,065,096,700 | 35.697674 | 79 | 0.590938 | false |
tanglei528/horizon | openstack_dashboard/dashboards/project/networks/ports/tabs.py | 8 | 1493 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = "project/networks/ports/_detail_overview.html"
def get_context_data(self, request):
port_id = self.tab_group.kwargs['port_id']
try:
port = api.neutron.port_get(self.request, port_id)
except Exception:
redirect = reverse('horizon:project:networks:index')
msg = _('Unable to retrieve port details.')
exceptions.handle(request, msg, redirect=redirect)
return {'port': port}
class PortDetailTabs(tabs.TabGroup):
slug = "port_details"
tabs = (OverviewTab,)
| apache-2.0 | 6,224,038,831,314,055,000 | 32.931818 | 78 | 0.697254 | false |
TintypeMolly/Yuzuki | resource/reply.py | 2 | 3281 | # -*- coding: utf-8 -*-
import json
from exception import BadRequest, Unauthorized
from helper.model_control import get_article, get_reply_page, get_reply,\
delete_reply, edit_reply, create_reply
from helper.permission import is_anybody, can_comment, is_author_or_admin,\
is_author
from helper.resource import YuzukiResource, need_anybody_permission
from helper.slack import post_messages_to_subscribers
class ReplyParent(YuzukiResource):
isLeaf = False
def __init__(self):
YuzukiResource.__init__(self)
self.putChild("view", ReplyView())
self.putChild("write", ReplyWrite())
self.putChild("delete", ReplyDelete())
self.putChild("edit", ReplyEdit())
class ReplyView(YuzukiResource):
def render_GET(self, request):
article_id = request.get_argument("article_id")
page = request.get_argument_int("page", 1)
article = get_article(request, article_id)
if article.board.name == "notice" or (is_anybody(request)):
replies = get_reply_page(request, article, page)
return json.dumps([reply.to_dict() for reply in replies])
else:
raise Unauthorized()
class ReplyWrite(YuzukiResource):
@need_anybody_permission
def render_POST(self, request):
article_id = request.get_argument("article_id")
article = get_article(request, article_id)
if not can_comment(request, article.board):
raise Unauthorized()
content = request.get_argument("content")
# no empty reply
if content.strip():
reply = create_reply(request, article, content)
request.dbsession.add(reply)
request.dbsession.commit()
page = request.get_argument("page", None)
redirect = "/article/view?id=%s" % article.uid
if page:
redirect += "&page=%s" % page
post_messages_to_subscribers(request, article.subscribing_users,
u"구독하고 있는 글에 새 댓글이 등록되었습니다.",
reply.user, article.subject,
content, redirect)
request.redirect(redirect)
return "success"
else:
raise BadRequest()
class ReplyDelete(YuzukiResource):
@need_anybody_permission
def render_DELETE(self, request):
reply_id = request.get_argument("id")
reply = get_reply(request, reply_id)
if is_author_or_admin(request, reply):
delete_reply(request, reply)
request.dbsession.commit()
return "success"
else:
raise Unauthorized()
class ReplyEdit(YuzukiResource):
@need_anybody_permission
def render_POST(self, request):
reply_id = request.get_argument("id")
reply = get_reply(request, reply_id)
if is_author(request, reply):
content = request.get_argument("content")
if content.strip():
edit_reply(request, reply, content)
request.dbsession.commit()
return "reply edit success"
else:
raise BadRequest()
else:
raise Unauthorized()
| mit | -9,149,046,863,757,112,000 | 35.033333 | 76 | 0.595128 | false |
daltonmaag/robofab | Scripts/RoboFabIntro/intro_FoundrySettings.py | 9 | 2906 | #FLM: RoboFab Intro, FoundrySettings.plist
#
#
# demo FoundrySettings.plist
#
#
# Setting all the data strings in the FontLab font header can be a repetitive and
# tedious exercise. RoboFab to the rescue! RoboFab features some nifty tools
# that help automate this process. These tools read a .plist file that you are free
# to edit to include your own standard settings. Currently, the .plist file contains
# these bits of data. We reserve the right to add more in the future.
# -copyright
# -trademark
# -license
# -licenseurl
# -notice
# -ttvendor
# -vendorurl
# -designer
# -designerurl
#
# The foundry settings tools parse this .plist file into a python dictionary that
# can be used to apply the data to fonts. It's really easy to use. Let's check it out!
from robofab.world import CurrentFont
from robofab.tools.toolsFL import makeDataFolder
# all the foundry settings tools live here:
from robofab.tools.toolsAll import readFoundrySettings, getFoundrySetting, setFoundrySetting
import time
import os
# You will need a font open in fontlab for this demo
font = CurrentFont()
# We need to know where the .plist file lives. In the FontLab environment
# it can live in the "RoboFab Data" folder with its friends. makeDataFolder()
# will make the data folder if it doesn't exist and it will return the path
settingsPath = os.path.join(makeDataFolder(), 'FoundrySettings.plist')
# Now, let's load those settings up
# readFoundrySettings(path) will return the data from the .plist as dictionary
mySettings = readFoundrySettings(settingsPath)
# Let's get the current year so that the year string is always up to date
font.info.year = time.gmtime(time.time())[0]
# Apply those settings that we just loaded
font.info.copyright = mySettings['copyright']
font.info.trademark = mySettings['trademark']
font.info.openTypeNameLicense = mySettings['license']
font.info.openTypeNameLicenseURL = mySettings['licenseurl']
font.info.openTypeNameDescription = mySettings['notice']
font.info.openTypeOS2VendorID = mySettings['ttvendor']
font.info.openTypeNameManufacturerURL = mySettings['vendorurl']
font.info.openTypeNameDesigner = mySettings['designer']
font.info.openTypeNameDesignerURL = mySettings['designerurl']
# and call the update method
font.update()
# But, Prof. RoboFab, what if I want to change the settings in the .plist file?
# Good question. You can always edit the .plist data by hand, or you can
# do it via a script. It would go a little something like this:
setFoundrySetting('trademark', 'This font is a trademark of Me, Myself and I', settingsPath)
# If you are on OSX, and you have the Apple developers tools installed, you
# can also edit it with /Developer/Applications/Property List Editor.
# And to read only only one setting from the file you can use this handly little method.
font.info.trademark = getFoundrySetting('trademark', settingsPath)
font.update()
# It's that easy!
| bsd-3-clause | -2,668,712,273,434,922,000 | 37.746667 | 92 | 0.775981 | false |
krkhan/azure-linux-extensions | VMEncryption/main/BackupLogger.py | 8 | 1482 | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import traceback
import urlparse
import httplib
import os
import string
class BackupLogger(object):
def __init__(self, hutil):
self.hutil = hutil
self.current_process_id = os.getpid()
"""description of class"""
def log(self, msg, level='Info'):
log_msg = "{0}: [{1}] {2}".format(self.current_process_id, level, msg)
log_msg = filter(lambda c: c in string.printable, log_msg)
log_msg = log_msg.encode('ascii', 'ignore')
self.hutil.log(log_msg)
self.log_to_console(log_msg)
def log_to_console(self, msg):
try:
with open('/dev/console', 'w') as f:
msg = filter(lambda c: c in string.printable, msg)
f.write('[AzureDiskEncryption] ' + msg + '\n')
except IOError as e:
pass
| apache-2.0 | 2,927,821,668,167,324,700 | 31.173913 | 78 | 0.659459 | false |
tst-ahernandez/earthenterprise | earth_enterprise/src/google/protobuf-py/google/protobuf/internal/type_checkers.py | 9 | 12112 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class UnicodeValueChecker(object):
"""Checker used for string fields."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
# If the value is of type 'str' make sure that it is in 7-bit ASCII
# encoding.
if isinstance(proposed_value, str):
try:
unicode(proposed_value, 'ascii')
except UnicodeDecodeError:
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
'encoding. Non-ASCII strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| apache-2.0 | 4,025,457,730,975,738,000 | 41.498246 | 80 | 0.748844 | false |
ProstoMaxim/incubator-airflow | airflow/ti_deps/deps/valid_state_dep.py | 44 | 2218 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.exceptions import AirflowException
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
class ValidStateDep(BaseTIDep):
NAME = "Task Instance State"
IGNOREABLE = True
"""
Ensures that the task instance's state is in a given set of valid states.
:param valid_states: A list of valid states that a task instance can have to meet
this dependency.
:type valid_states: set(str)
:return: whether or not the task instance's state is valid
"""
def __init__(self, valid_states):
super(ValidStateDep, self).__init__()
if not valid_states:
raise AirflowException(
'ValidStatesDep received an empty set of valid states.')
self._valid_states = valid_states
def __eq__(self, other):
return type(self) == type(other) and self._valid_states == other._valid_states
def __hash__(self):
return hash((type(self), tuple(self._valid_states)))
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if dep_context.ignore_ti_state:
yield self._passing_status(
reason="Context specified that state should be ignored.")
return
if ti.state in self._valid_states:
yield self._passing_status(reason="Task state {} was valid.".format(ti.state))
return
yield self._failing_status(
reason="Task is in the '{0}' state which is not a valid state for "
"execution. The task must be cleared in order to be run.".format(
ti.state))
| apache-2.0 | -4,903,598,234,940,261,000 | 36.59322 | 90 | 0.658702 | false |
mattvonrocketstein/smash | smashlib/ipy3x/core/inputsplitter.py | 1 | 24256 | """Input handling and transformation machinery.
The first class in this module, :class:`InputSplitter`, is designed to tell when
input from a line-oriented frontend is complete and should be executed, and when
the user should be prompted for another line of code instead. The name 'input
splitter' is largely for historical reasons.
A companion, :class:`IPythonInputSplitter`, provides the same functionality but
with full support for the extended IPython syntax (magics, system calls, etc).
The code to actually do these transformations is in :mod:`IPython.core.inputtransformer`.
:class:`IPythonInputSplitter` feeds the raw code to the transformers in order
and stores the results.
For more details, see the class docstrings below.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import ast
import codeop
import re
import sys
import warnings
from IPython.utils.py3compat import cast_unicode
from IPython.core.inputtransformer import (leading_indent,
classic_prompt,
ipy_prompt,
strip_encoding_cookie,
cellmagic,
assemble_logical_lines,
help_end,
escaped_commands,
assign_from_magic,
assign_from_system,
assemble_python_lines,
)
# These are available in this module for backwards compatibility.
from IPython.core.inputtransformer import (ESC_SHELL, ESC_SH_CAP, ESC_HELP,
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN, ESC_SEQUENCES)
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
# FIXME: These are general-purpose utilities that later can be moved to the
# general ward. Kept here for now because we're being very strict about test
# coverage with this code, and this lets us ensure that we keep 100% coverage
# while developing.
# compiled regexps for autoindent management
dedent_re = re.compile('|'.join([
r'^\s+raise(\s.*)?$', # raise statement (+ space + other stuff, maybe)
r'^\s+raise\([^\)]*\).*$', # wacky raise with immediate open paren
r'^\s+return(\s.*)?$', # normal return (+ space + other stuff, maybe)
r'^\s+return\([^\)]*\).*$', # wacky return with immediate open paren
r'^\s+pass\s*$', # pass (optionally followed by trailing spaces)
r'^\s+break\s*$', # break (optionally followed by trailing spaces)
r'^\s+continue\s*$', # continue (optionally followed by trailing spaces)
]))
ini_spaces_re = re.compile(r'^([ \t\r\f\v]+)')
# regexp to match pure comment lines so we don't accidentally insert 'if 1:'
# before pure comments
comment_line_re = re.compile('^\s*\#')
def num_ini_spaces(s):
"""Return the number of initial spaces in a string.
Note that tabs are counted as a single space. For now, we do *not* support
mixing of tabs and spaces in the user's input.
Parameters
----------
s : string
Returns
-------
n : int
"""
ini_spaces = ini_spaces_re.match(s)
if ini_spaces:
return ini_spaces.end()
else:
return 0
def last_blank(src):
"""Determine if the input source ends in a blank.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src:
return False
ll = src.splitlines()[-1]
return (ll == '') or ll.isspace()
last_two_blanks_re = re.compile(r'\n\s*\n\s*$', re.MULTILINE)
last_two_blanks_re2 = re.compile(r'.+\n\s*\n\s+$', re.MULTILINE)
def last_two_blanks(src):
"""Determine if the input source ends in two blanks.
A blank is either a newline or a line consisting of whitespace.
Parameters
----------
src : string
A single or multiline string.
"""
if not src:
return False
# The logic here is tricky: I couldn't get a regexp to work and pass all
# the tests, so I took a different approach: split the source by lines,
# grab the last two and prepend '###\n' as a stand-in for whatever was in
# the body before the last two lines. Then, with that structure, it's
# possible to analyze with two regexps. Not the most elegant solution, but
# it works. If anyone tries to change this logic, make sure to validate
# the whole test suite first!
new_src = '\n'.join(['###\n'] + src.splitlines()[-2:])
return (bool(last_two_blanks_re.match(new_src)) or
bool(last_two_blanks_re2.match(new_src)))
def remove_comments(src):
"""Remove all comments from input source.
Note: comments are NOT recognized inside of strings!
Parameters
----------
src : string
A single or multiline input string.
Returns
-------
String with all Python comments removed.
"""
return re.sub('#.*', '', src)
def get_input_encoding():
"""Return the default standard input encoding.
If sys.stdin has no encoding, 'ascii' is returned."""
# There are strange environments for which sys.stdin.encoding is None. We
# ensure that a valid encoding is returned.
encoding = getattr(sys.stdin, 'encoding', None)
if encoding is None:
encoding = 'ascii'
return encoding
#-----------------------------------------------------------------------------
# Classes and functions for normal Python syntax handling
#-----------------------------------------------------------------------------
class InputSplitter(object):
r"""An object that can accumulate lines of Python source before execution.
This object is designed to be fed python source line-by-line, using
:meth:`push`. It will return on each push whether the currently pushed
code could be executed already. In addition, it provides a method called
:meth:`push_accepts_more` that can be used to query whether more input
can be pushed into a single interactive block.
This is a simple example of how an interactive terminal-based client can use
this tool::
isp = InputSplitter()
while isp.push_accepts_more():
indent = ' '*isp.indent_spaces
prompt = '>>> ' + indent
line = indent + raw_input(prompt)
isp.push(line)
print 'Input source was:\n', isp.source_reset(),
"""
# Number of spaces of indentation computed from input that has been pushed
# so far. This is the attributes callers should query to get the current
# indentation level, in order to provide auto-indent facilities.
indent_spaces = 0
# String, indicating the default input encoding. It is computed by default
# at initialization time via get_input_encoding(), but it can be reset by a
# client with specific knowledge of the encoding.
encoding = ''
# String where the current full source input is stored, properly encoded.
# Reading this attribute is the normal way of querying the currently pushed
# source code, that has been properly encoded.
source = ''
# Code object corresponding to the current source. It is automatically
# synced to the source, so it can be queried at any time to obtain the code
# object; it will be None if the source doesn't compile to valid Python.
code = None
# Private attributes
# List with lines of input accumulated so far
_buffer = None
# Command compiler
_compile = None
# Mark when input has changed indentation all the way back to flush-left
_full_dedent = False
# Boolean indicating whether the current block is complete
_is_complete = None
# Boolean indicating whether the current block has an unrecoverable syntax
# error
_is_invalid = False
def __init__(self):
"""Create a new InputSplitter instance.
"""
self._buffer = []
self._compile = codeop.CommandCompiler()
self.encoding = get_input_encoding()
def reset(self):
"""Reset the input buffer and associated state."""
self.indent_spaces = 0
self._buffer[:] = []
self.source = ''
self.code = None
self._is_complete = False
self._is_invalid = False
self._full_dedent = False
def source_reset(self):
"""Return the input source and perform a full reset.
"""
out = self.source
self.reset()
return out
def check_complete(self, source):
"""Return whether a block of code is ready to execute, or should be continued
This is a non-stateful API, and will reset the state of this InputSplitter.
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
self.reset()
try:
self.push(source)
except SyntaxError:
# Transformers in IPythonInputSplitter can raise SyntaxError,
# which push() will not catch.
return 'invalid', None
else:
if self._is_invalid:
return 'invalid', None
elif self.push_accepts_more():
return 'incomplete', self.indent_spaces
else:
return 'complete', None
finally:
self.reset()
def push(self, lines):
"""Push one or more lines of input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (``_is_complete``), so it
can be queried at any time.
"""
self._store(lines)
source = self.source
# Before calling _compile(), reset the code object to None so that if an
# exception is raised in compilation, we don't mislead by having
# inconsistent code/source attributes.
self.code, self._is_complete = None, None
self._is_invalid = False
# Honor termination lines properly
if source.endswith('\\\n'):
return False
self._update_indent(lines)
try:
with warnings.catch_warnings():
warnings.simplefilter('error', SyntaxWarning)
self.code = self._compile(source, symbol="exec")
# Invalid syntax can produce any of a number of different errors from
# inside the compiler, so we have to catch them all. Syntax errors
# immediately produce a 'ready' block, so the invalid Python can be
# sent to the kernel for evaluation with possible ipython
# special-syntax conversion.
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError, SyntaxWarning):
self._is_complete = True
self._is_invalid = True
else:
# Compilation didn't produce any exceptions (though it may not have
# given a complete code object)
self._is_complete = self.code is not None
return self._is_complete
def push_accepts_more(self):
"""Return whether a block of interactive input can accept more input.
This method is meant to be used by line-oriented frontends, who need to
guess whether a block is complete or not based solely on prior and
current input lines. The InputSplitter considers it has a complete
interactive block and will not accept more input when either:
* A SyntaxError is raised
* The code is complete and consists of a single line or a single
non-compound statement
* The code is complete and has a blank line at the end
If the current input produces a syntax error, this method immediately
returns False but does *not* raise the syntax error exception, as
typically clients will want to send invalid syntax to an execution
backend which might convert the invalid syntax into valid Python via
one of the dynamic IPython mechanisms.
"""
# With incomplete input, unconditionally accept more
# A syntax error also sets _is_complete to True - see push()
if not self._is_complete:
# print("Not complete") # debug
return True
# The user can make any (complete) input execute by leaving a blank
# line
last_line = self.source.splitlines()[-1]
if (not last_line) or last_line.isspace():
# print("Blank line") # debug
return False
# If there's just a single line or AST node, and we're flush left, as is
# the case after a simple statement such as 'a=1', we want to execute it
# straight away.
if self.indent_spaces == 0:
if len(self.source.splitlines()) <= 1:
return False
try:
code_ast = ast.parse(u''.join(self._buffer))
except Exception:
# print("Can't parse AST") # debug
return False
else:
if len(code_ast.body) == 1 and \
not hasattr(code_ast.body[0], 'body'):
# print("Simple statement") # debug
return False
# General fallback - accept more code
return True
#------------------------------------------------------------------------
# Private interface
#------------------------------------------------------------------------
def _find_indent(self, line):
"""Compute the new indentation level for a single line.
Parameters
----------
line : str
A single new line of non-whitespace, non-comment Python input.
Returns
-------
indent_spaces : int
New value for the indent level (it may be equal to self.indent_spaces
if indentation doesn't change.
full_dedent : boolean
Whether the new line causes a full flush-left dedent.
"""
indent_spaces = self.indent_spaces
full_dedent = self._full_dedent
inisp = num_ini_spaces(line)
if inisp < indent_spaces:
indent_spaces = inisp
if indent_spaces <= 0:
# print 'Full dedent in text',self.source # dbg
full_dedent = True
if line.rstrip()[-1] == ':':
indent_spaces += 4
elif dedent_re.match(line):
indent_spaces -= 4
if indent_spaces <= 0:
full_dedent = True
# Safety
if indent_spaces < 0:
indent_spaces = 0
# print 'safety' # dbg
return indent_spaces, full_dedent
def _update_indent(self, lines):
for line in remove_comments(lines).splitlines():
if line and not line.isspace():
self.indent_spaces, self._full_dedent = self._find_indent(line)
def _store(self, lines, buffer=None, store='source'):
"""Store one or more lines of input.
If input lines are not newline-terminated, a newline is automatically
appended."""
if buffer is None:
buffer = self._buffer
if lines.endswith('\n'):
buffer.append(lines)
else:
buffer.append(lines + '\n')
setattr(self, store, self._set_source(buffer))
def _set_source(self, buffer):
return u''.join(buffer)
class IPythonInputSplitter(InputSplitter):
"""An input splitter that recognizes all of IPython's special syntax."""
# String with raw, untransformed input.
source_raw = ''
# Flag to track when a transformer has stored input that it hasn't given
# back yet.
transformer_accumulating = False
# Flag to track when assemble_python_lines has stored input that it hasn't
# given back yet.
within_python_line = False
# Private attributes
# List with lines of raw input accumulated so far.
_buffer_raw = None
def __init__(self, line_input_checker=True, physical_line_transforms=None,
logical_line_transforms=None, python_line_transforms=None):
super(IPythonInputSplitter, self).__init__()
self._buffer_raw = []
self._validate = True
if physical_line_transforms is not None:
self.physical_line_transforms = physical_line_transforms
else:
self.physical_line_transforms = [
leading_indent(),
classic_prompt(),
ipy_prompt(),
strip_encoding_cookie(),
cellmagic(end_on_blank_line=line_input_checker),
]
self.assemble_logical_lines = assemble_logical_lines()
if logical_line_transforms is not None:
self.logical_line_transforms = logical_line_transforms
else:
self.logical_line_transforms = [
help_end(),
escaped_commands(),
assign_from_magic(),
assign_from_system(),
]
self.assemble_python_lines = assemble_python_lines()
if python_line_transforms is not None:
self.python_line_transforms = python_line_transforms
else:
# We don't use any of these at present
self.python_line_transforms = []
@property
def transforms(self):
"Quick access to all transformers."
return self.physical_line_transforms + \
[self.assemble_logical_lines] + self.logical_line_transforms + \
[self.assemble_python_lines] + self.python_line_transforms
@property
def transforms_in_use(self):
"""Transformers, excluding logical line transformers if we're in a
Python line."""
t = self.physical_line_transforms[:]
if not self.within_python_line:
t += [self.assemble_logical_lines] + self.logical_line_transforms
return t + [self.assemble_python_lines] + self.python_line_transforms
def reset(self):
"""Reset the input buffer and associated state."""
super(IPythonInputSplitter, self).reset()
self._buffer_raw[:] = []
self.source_raw = ''
self.transformer_accumulating = False
self.within_python_line = False
for t in self.transforms:
try:
t.reset()
except SyntaxError:
# Nothing that calls reset() expects to handle transformer
# errors
pass
def flush_transformers(self):
def _flush(transform, outs):
"""yield transformed lines
always strings, never None
transform: the current transform
outs: an iterable of previously transformed inputs.
Each may be multiline, which will be passed
one line at a time to transform.
"""
for out in outs:
for line in out.splitlines():
# push one line at a time
tmp = transform.push(line)
if tmp is not None:
yield tmp
# reset the transform
tmp = transform.reset()
if tmp is not None:
yield tmp
out = []
for t in self.transforms_in_use:
out = _flush(t, out)
out = list(out)
if out:
self._store('\n'.join(out))
def raw_reset(self):
"""Return raw input only and perform a full reset.
"""
out = self.source_raw
self.reset()
return out
def source_reset(self):
try:
self.flush_transformers()
return self.source
finally:
self.reset()
def push_accepts_more(self):
if self.transformer_accumulating:
return True
else:
return super(IPythonInputSplitter, self).push_accepts_more()
def transform_cell(self, cell):
"""Process and translate a cell of input.
"""
self.reset()
try:
self.push(cell)
self.flush_transformers()
return self.source
finally:
self.reset()
def push(self, lines):
"""Push one or more lines of IPython input.
This stores the given lines and returns a status code indicating
whether the code forms a complete Python block or not, after processing
all input lines for special IPython syntax.
Any exceptions generated in compilation are swallowed, but if an
exception was produced, the method returns True.
Parameters
----------
lines : string
One or more lines of Python input.
Returns
-------
is_complete : boolean
True if the current input source (the result of the current input
plus prior inputs) forms a complete Python execution block. Note that
this value is also stored as a private attribute (_is_complete), so it
can be queried at any time.
"""
# We must ensure all input is pure unicode
lines = cast_unicode(lines, self.encoding)
# ''.splitlines() --> [], but we need to push the empty line to transformers
lines_list = lines.splitlines()
if not lines_list:
lines_list = ['']
# Store raw source before applying any transformations to it. Note
# that this must be done *after* the reset() call that would otherwise
# flush the buffer.
self._store(lines, self._buffer_raw, 'source_raw')
for line in lines_list:
out = self.push_line(line)
return out
def push_line(self, line):
buf = self._buffer
def _accumulating(dbg):
# print(dbg)
self.transformer_accumulating = True
return False
for transformer in self.physical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
if not self.within_python_line:
line = self.assemble_logical_lines.push(line)
if line is None:
return _accumulating('acc logical line')
for transformer in self.logical_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
line = self.assemble_python_lines.push(line)
if line is None:
self.within_python_line = True
return _accumulating('acc python line')
else:
self.within_python_line = False
for transformer in self.python_line_transforms:
line = transformer.push(line)
if line is None:
return _accumulating(transformer)
# print("transformers clear") #debug
self.transformer_accumulating = False
return super(IPythonInputSplitter, self).push(line)
| mit | 4,596,734,243,576,576,500 | 34.10275 | 91 | 0.583155 | false |
galaxy001/monkey-videos | build/build.py | 6 | 1309 | #!/usr/bin/env python3
import os
handlers = [
'56.js',
'acfun.js',
'bilibili.js',
'cntv.js',
'funshion.js',
'ifeng.js',
'iqiyi.js',
'letv.js',
'justing.js',
'ku6.js',
'netease.js',
'pps.js',
'sina.js',
'sohu.js',
'tucao.js',
'tudou.js',
'wasu.js',
'weiqitv.js',
'youku.js',
'youtube.js',
]
def build():
def write_includes():
for handler in handlers:
name = os.path.splitext(handler)[0]
header_path = os.path.join('../', name, header)
out.write(open(header_path).read())
def write_handlers():
for handler in handlers:
name = os.path.splitext(handler)[0]
handler_path = os.path.join('../', name, handler)
print(handler_path)
out.write(open(handler_path).read())
header = 'header.js'
input_path = 'monkey-videos.js'
output_path = '../monkey-videos.user.js'
with open(input_path) as fh, open(output_path, 'w') as out:
for line in fh:
if line.startswith('IMPORT_INCLUDES'):
write_includes()
elif line.startswith('IMPORT_HANDLERS'):
write_handlers()
else:
out.write(line)
if __name__ == '__main__':
build()
| gpl-3.0 | 6,570,973,924,532,993,000 | 21.964912 | 63 | 0.514133 | false |
nishantjr/pjproject | tests/pjsua/mod_media_playrec.py | 40 | 2921 | # $Id$
# PLAYFILE -> RECFILE:
# Input file is played and is recorded to output, then compare them.
# Useful to tes clock rates compatibility and resample quality
# null-audio
# port 1: wav file input xxxxxx.clock_rate.wav, e.g: test1.8.wav
# port 2: wav file ouput xxxxxx.clock_rate.wav, e.g: res1.8.wav
# wav input must be more than 3 seconds long
import time
import imp
import sys
import re
import subprocess
import inc_const as const
from inc_cfg import *
# Load configuration
cfg_file = imp.load_source("cfg_file", ARGS[1])
# WAV similarity calculator
COMPARE_WAV_EXE = ""
if sys.platform.find("win32")!=-1:
COMPARE_WAV_EXE = "tools/cmp_wav.exe"
G_INUNIX = False
else:
COMPARE_WAV_EXE = "tools/cmp_wav"
G_INUNIX = True
# Threshold to declare degradation is too high when result is lower than this value
COMPARE_THRESHOLD = 2
# COMPARE params
input_filename = "" # Input filename
output_filename = "" # Output filename
# Test body function
def test_func(t):
global input_filename
global output_filename
endpt = t.process[0]
# Get input file name
input_filename = re.compile(const.MEDIA_PLAY_FILE).search(endpt.inst_param.arg).group(1)
endpt.trace("Input file = " + input_filename)
# Get output file name
output_filename = re.compile(const.MEDIA_REC_FILE).search(endpt.inst_param.arg).group(1)
endpt.trace("Output file = " + output_filename)
# Find appropriate clock rate for the input file
clock_rate = re.compile(".+(\.\d+\.wav)$").match(output_filename).group(1)
if (clock_rate==None):
endpt.trace("Cannot compare input & output, incorrect output filename format")
return
input_filename = re.sub("\.\d+\.wav$", clock_rate, input_filename)
endpt.trace("WAV file to be compared with output = " + input_filename)
# Connect input-output file
endpt.sync_stdout()
endpt.send("cc 1 2")
endpt.expect(const.MEDIA_CONN_PORT_SUCCESS)
# Wait
time.sleep(3)
endpt.sync_stdout()
# Disconnect input-output file
endpt.send("cd 1 2")
endpt.expect(const.MEDIA_DISCONN_PORT_SUCCESS)
# Post body function
def post_func(t):
global input_filename
global output_filename
endpt = t.process[0]
# Check WAV similarity
fullcmd = COMPARE_WAV_EXE + " " + input_filename + " " + output_filename + " " + "3000"
endpt.trace("Popen " + fullcmd)
cmp_proc = subprocess.Popen(fullcmd, shell=G_INUNIX, stdout=subprocess.PIPE, universal_newlines=True)
# Parse similarity ouput
line = cmp_proc.stdout.readline()
mo_sim_val = re.match(".+=\s+(\d+)", line)
if (mo_sim_val == None):
raise TestError("Error comparing WAV files")
return
# Evaluate the similarity value
sim_val = mo_sim_val.group(1)
if (sim_val >= COMPARE_THRESHOLD):
endpt.trace("WAV similarity = " + sim_val)
else:
raise TestError("WAV degraded heavily, similarity = " + sim_val)
# Here where it all comes together
test = cfg_file.test_param
test.test_func = test_func
test.post_func = post_func
| gpl-2.0 | -6,874,661,052,290,806,000 | 26.046296 | 102 | 0.709688 | false |
anusornc/vitess | py/vtdb/vtdb_logger.py | 7 | 3102 | # Copyright 2014, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
import logging
# VtdbLogger's methods are called whenever something worth noting happens.
# The default behavior of the class is to log using the logging module.
# Registering a new implementation allows the client code to report the
# conditions to any custom reporting mechanism.
#
# We use this in the following cases:
# - error reporting (an exception happened)
# - performance logging (calls to other services took that long)
class VtdbLogger(object):
#
# topology callbacks
#
# topo_keyspace_fetch is called when we successfully get a SrvKeyspace object.
def topo_keyspace_fetch(self, keyspace_name, topo_rtt):
logging.info("Fetched keyspace %s from topo_client in %f secs", keyspace_name, topo_rtt)
# topo_empty_keyspace_list is called when we get an empty list of
# keyspaces from topo server.
def topo_empty_keyspace_list(self):
logging.warning('topo_empty_keyspace_list')
# topo_bad_keyspace_data is called if we generated an exception
# when reading a keyspace. This is within an exception handler.
def topo_bad_keyspace_data(self, keyspace_name):
logging.exception('error getting or parsing keyspace data for %s',
keyspace_name)
# topo_zkocc_error is called whenever we get a zkocc.ZkOccError
# when trying to resolve an endpoint.
def topo_zkocc_error(self, message, db_key, e):
logging.warning('topo_zkocc_error: %s for %s: %s', message, db_key, e)
# topo_exception is called whenever we get an exception when trying
# to resolve an endpoint (that is not a zkocc.ZkOccError, these get
# handled by topo_zkocc_error).
def topo_exception(self, message, db_key, e):
logging.warning('topo_exception: %s for %s: %s', message, db_key, e)
#
# vtclient callbacks
#
# Integrity Error is called when mysql throws an IntegrityError on a query.
# This is thrown by both vtclient and vtgatev2.
def integrity_error(self, e):
logging.warning('integrity_error: %s', e)
# vtclient_exception is called when a FatalError is raised by
# vtclient (that error is sent back to the application, the retries
# happen at a lower level). e can be one of
# dbexceptions.{RetryError, FatalError, TxPoolFull}
# or a more generic dbexceptions.OperationalError
def vtclient_exception(self, keyspace_name, shard_name, db_type, e):
logging.warning('vtclient_exception for %s.%s.%s: %s', keyspace_name,
shard_name, db_type, e)
#
# vtgatev2 callbacks
#
# vtgatev2_exception is called when we get an exception talking to vtgate.
def vtgatev2_exception(self, e):
logging.warning('vtgatev2_exception: %s', e)
def log_private_data(self, private_data):
logging.info("Additional exception data %s", private_data)
# registration mechanism for VtdbLogger
__vtdb_logger = VtdbLogger()
def register_vtdb_logger(logger):
global __vtdb_logger
__vtdb_logger = logger
def get_logger():
return __vtdb_logger
| bsd-3-clause | 8,214,899,324,859,678,000 | 34.25 | 92 | 0.720503 | false |
shiminasai/ciat_plataforma | guias_cacao/migrations/0009_auto__add_actividadescierre__add_cierremanejo2__add_cierreplaga2__add_.py | 3 | 113662 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ActividadesCierre'
db.create_table(u'guias_cacao_actividadescierre', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)),
))
db.send_create_signal(u'guias_cacao', ['ActividadesCierre'])
# Adding model 'CierreManejo2'
db.create_table(u'guias_cacao_cierremanejo2', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campo1', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=11)),
('campo2', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('campo3', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreManejo2'])
# Adding model 'CierrePlaga2'
db.create_table(u'guias_cacao_cierreplaga2', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('monilla', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('mazorca', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('zompopos', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierrePlaga2'])
# Adding model 'CierreManejo1'
db.create_table(u'guias_cacao_cierremanejo1', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campo1', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=11)),
('campo2', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('campo3', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreManejo1'])
# Adding model 'CierreManejo6'
db.create_table(u'guias_cacao_cierremanejo6', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campo1', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=11)),
('campo2', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('campo3', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreManejo6'])
# Adding model 'CierreManejo7'
db.create_table(u'guias_cacao_cierremanejo7', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campo1', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('campo2', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('campo3', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreManejo7'])
# Adding model 'CierreManejo4'
db.create_table(u'guias_cacao_cierremanejo4', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campo1', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=11)),
('campo2', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=15)),
('campo3', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=15)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreManejo4'])
# Adding model 'CierreManejo5'
db.create_table(u'guias_cacao_cierremanejo5', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campo1', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=11)),
('campo2', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('campo3', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreManejo5'])
# Adding model 'CierreConocimiento1'
db.create_table(u'guias_cacao_cierreconocimiento1', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('criollas', self.gf('django.db.models.fields.IntegerField')()),
('forastero', self.gf('django.db.models.fields.IntegerField')()),
('trinitaria', self.gf('django.db.models.fields.IntegerField')()),
('hibridos', self.gf('django.db.models.fields.IntegerField')()),
('clones', self.gf('django.db.models.fields.IntegerField')()),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreConocimiento1'])
# Adding model 'CierreConocimiento3'
db.create_table(u'guias_cacao_cierreconocimiento3', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('criollas', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('forastero', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('trinitaria', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('hibridos', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('clones', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreConocimiento3'])
# Adding model 'CierreConocimiento2'
db.create_table(u'guias_cacao_cierreconocimiento2', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('criollas', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('forastero', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('trinitaria', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('hibridos', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('clones', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreConocimiento2'])
# Adding model 'CierrePlaga3'
db.create_table(u'guias_cacao_cierreplaga3', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('monilla', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=3)),
('mazorca', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=3)),
('zompopos', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=3)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierrePlaga3'])
# Adding model 'CierrePlaga1'
db.create_table(u'guias_cacao_cierreplaga1', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('monilla', self.gf('django.db.models.fields.FloatField')()),
('mazorca', self.gf('django.db.models.fields.FloatField')()),
('zompopos', self.gf('django.db.models.fields.FloatField')()),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierrePlaga1'])
# Adding model 'CierreSuelo3'
db.create_table(u'guias_cacao_cierresuelo3', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('abono', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('hojarasca', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('organico', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreSuelo3'])
# Adding model 'CierreSuelo2'
db.create_table(u'guias_cacao_cierresuelo2', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('abono', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('hojarasca', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('organico', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreSuelo2'])
# Adding model 'CierreSuelo1'
db.create_table(u'guias_cacao_cierresuelo1', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tema', self.gf('django.db.models.fields.IntegerField')()),
('abono', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('hojarasca', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('organico', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreSuelo1'])
# Adding model 'ManejosCierre'
db.create_table(u'guias_cacao_manejoscierre', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=250)),
))
db.send_create_signal(u'guias_cacao', ['ManejosCierre'])
# Adding model 'CierreCosto1'
db.create_table(u'guias_cacao_cierrecosto1', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('costo', self.gf('django.db.models.fields.FloatField')()),
('area', self.gf('django.db.models.fields.FloatField')()),
('tipo', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=9)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreCosto1'])
# Adding model 'CierreManejo'
db.create_table(u'guias_cacao_cierremanejo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('manejo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.ManejosCierre'])),
('reposo', self.gf('django.db.models.fields.IntegerField')()),
('crecimiento', self.gf('django.db.models.fields.IntegerField')()),
('floracion', self.gf('django.db.models.fields.IntegerField')()),
('cosecha', self.gf('django.db.models.fields.IntegerField')()),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreManejo'])
# Adding model 'CierreActividad'
db.create_table(u'guias_cacao_cierreactividad', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('actividad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.ActividadesCierre'])),
('meses', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=23)),
('familiar', self.gf('django.db.models.fields.FloatField')()),
('contratada', self.gf('django.db.models.fields.FloatField')()),
('insumo', self.gf('django.db.models.fields.CharField')(max_length=250)),
('costo', self.gf('django.db.models.fields.FloatField')()),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreActividad'])
# Adding model 'CierreCicloTrabajo'
db.create_table(u'guias_cacao_cierreciclotrabajo', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pregunta1', self.gf('django.db.models.fields.IntegerField')()),
('pregunta2', self.gf('django.db.models.fields.IntegerField')()),
('pregunta3', self.gf('django.db.models.fields.IntegerField')()),
('pregunta4', self.gf('django.db.models.fields.IntegerField')()),
('pregunta5', self.gf('django.db.models.fields.IntegerField')()),
('pregunta6', self.gf('django.db.models.fields.IntegerField')()),
('pregunta7', self.gf('django.db.models.fields.IntegerField')()),
('pregunta8', self.gf('django.db.models.fields.IntegerField')()),
('pregunta9', self.gf('django.db.models.fields.TextField')()),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreCicloTrabajo'])
# Adding model 'FichaCierre'
db.create_table(u'guias_cacao_fichacierre', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('productor', self.gf('django.db.models.fields.related.ForeignKey')(related_name='persona_productor_cierre', to=orm['mapeo.Persona'])),
('tecnico', self.gf('django.db.models.fields.related.ForeignKey')(related_name='persona_tecnico_cierre', to=orm['mapeo.Persona'])),
('fecha_visita', self.gf('django.db.models.fields.DateField')()),
))
db.send_create_signal(u'guias_cacao', ['FichaCierre'])
# Adding model 'CierreManejo3'
db.create_table(u'guias_cacao_cierremanejo3', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campo1', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=11)),
('campo2', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('campo3', self.gf('multiselectfield.db.fields.MultiSelectField')(max_length=13)),
('ficha', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['guias_cacao.FichaCierre'])),
))
db.send_create_signal(u'guias_cacao', ['CierreManejo3'])
def backwards(self, orm):
# Deleting model 'ActividadesCierre'
db.delete_table(u'guias_cacao_actividadescierre')
# Deleting model 'CierreManejo2'
db.delete_table(u'guias_cacao_cierremanejo2')
# Deleting model 'CierrePlaga2'
db.delete_table(u'guias_cacao_cierreplaga2')
# Deleting model 'CierreManejo1'
db.delete_table(u'guias_cacao_cierremanejo1')
# Deleting model 'CierreManejo6'
db.delete_table(u'guias_cacao_cierremanejo6')
# Deleting model 'CierreManejo7'
db.delete_table(u'guias_cacao_cierremanejo7')
# Deleting model 'CierreManejo4'
db.delete_table(u'guias_cacao_cierremanejo4')
# Deleting model 'CierreManejo5'
db.delete_table(u'guias_cacao_cierremanejo5')
# Deleting model 'CierreConocimiento1'
db.delete_table(u'guias_cacao_cierreconocimiento1')
# Deleting model 'CierreConocimiento3'
db.delete_table(u'guias_cacao_cierreconocimiento3')
# Deleting model 'CierreConocimiento2'
db.delete_table(u'guias_cacao_cierreconocimiento2')
# Deleting model 'CierrePlaga3'
db.delete_table(u'guias_cacao_cierreplaga3')
# Deleting model 'CierrePlaga1'
db.delete_table(u'guias_cacao_cierreplaga1')
# Deleting model 'CierreSuelo3'
db.delete_table(u'guias_cacao_cierresuelo3')
# Deleting model 'CierreSuelo2'
db.delete_table(u'guias_cacao_cierresuelo2')
# Deleting model 'CierreSuelo1'
db.delete_table(u'guias_cacao_cierresuelo1')
# Deleting model 'ManejosCierre'
db.delete_table(u'guias_cacao_manejoscierre')
# Deleting model 'CierreCosto1'
db.delete_table(u'guias_cacao_cierrecosto1')
# Deleting model 'CierreManejo'
db.delete_table(u'guias_cacao_cierremanejo')
# Deleting model 'CierreActividad'
db.delete_table(u'guias_cacao_cierreactividad')
# Deleting model 'CierreCicloTrabajo'
db.delete_table(u'guias_cacao_cierreciclotrabajo')
# Deleting model 'FichaCierre'
db.delete_table(u'guias_cacao_fichacierre')
# Deleting model 'CierreManejo3'
db.delete_table(u'guias_cacao_cierremanejo3')
models = {
u'guias_cacao.accionesenfermedad': {
'Meta': {'object_name': 'AccionesEnfermedad'},
'cuantas_veces': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meses': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'plagas_acciones': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'realiza_manejo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.accionessombra': {
'Meta': {'object_name': 'AccionesSombra'},
'accion': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.actividadescierre': {
'Meta': {'object_name': 'ActividadesCierre'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'guias_cacao.analisispoda': {
'Meta': {'object_name': 'AnalisisPoda'},
'campo1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'campo2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'campo3': ('django.db.models.fields.IntegerField', [], {}),
'campo4': ('django.db.models.fields.IntegerField', [], {}),
'campo5': ('django.db.models.fields.IntegerField', [], {}),
'campo6': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.analisissombra': {
'Meta': {'object_name': 'AnalisisSombra'},
'Problema': ('django.db.models.fields.IntegerField', [], {}),
'arreglo': ('django.db.models.fields.IntegerField', [], {}),
'calidad_hojarasca': ('django.db.models.fields.IntegerField', [], {}),
'competencia': ('django.db.models.fields.IntegerField', [], {}),
'densidad': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
'forma_copa': ('django.db.models.fields.IntegerField', [], {}),
'hojarasca': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.aumentarsombra': {
'Meta': {'object_name': 'AumentarSombra'},
'cambiando': ('django.db.models.fields.IntegerField', [], {}),
'cambiando_cuales': ('django.db.models.fields.CharField', [], {'max_length': '350'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'que_parte': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'sembrando': ('django.db.models.fields.IntegerField', [], {}),
'sembrando_cuales': ('django.db.models.fields.CharField', [], {'max_length': '350'}),
'todo': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.cierreactividad': {
'Meta': {'object_name': 'CierreActividad'},
'actividad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.ActividadesCierre']"}),
'contratada': ('django.db.models.fields.FloatField', [], {}),
'costo': ('django.db.models.fields.FloatField', [], {}),
'familiar': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insumo': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'meses': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'})
},
u'guias_cacao.cierreciclotrabajo': {
'Meta': {'object_name': 'CierreCicloTrabajo'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pregunta1': ('django.db.models.fields.IntegerField', [], {}),
'pregunta2': ('django.db.models.fields.IntegerField', [], {}),
'pregunta3': ('django.db.models.fields.IntegerField', [], {}),
'pregunta4': ('django.db.models.fields.IntegerField', [], {}),
'pregunta5': ('django.db.models.fields.IntegerField', [], {}),
'pregunta6': ('django.db.models.fields.IntegerField', [], {}),
'pregunta7': ('django.db.models.fields.IntegerField', [], {}),
'pregunta8': ('django.db.models.fields.IntegerField', [], {}),
'pregunta9': ('django.db.models.fields.TextField', [], {})
},
u'guias_cacao.cierreconocimiento1': {
'Meta': {'object_name': 'CierreConocimiento1'},
'clones': ('django.db.models.fields.IntegerField', [], {}),
'criollas': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
'forastero': ('django.db.models.fields.IntegerField', [], {}),
'hibridos': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tema': ('django.db.models.fields.IntegerField', [], {}),
'trinitaria': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.cierreconocimiento2': {
'Meta': {'object_name': 'CierreConocimiento2'},
'clones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'criollas': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
'forastero': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'hibridos': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tema': ('django.db.models.fields.IntegerField', [], {}),
'trinitaria': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'})
},
u'guias_cacao.cierreconocimiento3': {
'Meta': {'object_name': 'CierreConocimiento3'},
'clones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'criollas': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
'forastero': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'hibridos': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tema': ('django.db.models.fields.IntegerField', [], {}),
'trinitaria': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'})
},
u'guias_cacao.cierrecosto1': {
'Meta': {'object_name': 'CierreCosto1'},
'area': ('django.db.models.fields.FloatField', [], {}),
'costo': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tipo': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'})
},
u'guias_cacao.cierremanejo': {
'Meta': {'object_name': 'CierreManejo'},
'cosecha': ('django.db.models.fields.IntegerField', [], {}),
'crecimiento': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
'floracion': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manejo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.ManejosCierre']"}),
'reposo': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.cierremanejo1': {
'Meta': {'object_name': 'CierreManejo1'},
'campo1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'campo2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'campo3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cierremanejo2': {
'Meta': {'object_name': 'CierreManejo2'},
'campo1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'campo2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'campo3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cierremanejo3': {
'Meta': {'object_name': 'CierreManejo3'},
'campo1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'campo2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'campo3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cierremanejo4': {
'Meta': {'object_name': 'CierreManejo4'},
'campo1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'campo2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'campo3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cierremanejo5': {
'Meta': {'object_name': 'CierreManejo5'},
'campo1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'campo2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'campo3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cierremanejo6': {
'Meta': {'object_name': 'CierreManejo6'},
'campo1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'campo2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'campo3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cierremanejo7': {
'Meta': {'object_name': 'CierreManejo7'},
'campo1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'campo2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'campo3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cierreplaga1': {
'Meta': {'object_name': 'CierrePlaga1'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mazorca': ('django.db.models.fields.FloatField', [], {}),
'monilla': ('django.db.models.fields.FloatField', [], {}),
'tema': ('django.db.models.fields.IntegerField', [], {}),
'zompopos': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.cierreplaga2': {
'Meta': {'object_name': 'CierrePlaga2'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mazorca': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'monilla': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'tema': ('django.db.models.fields.IntegerField', [], {}),
'zompopos': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'})
},
u'guias_cacao.cierreplaga3': {
'Meta': {'object_name': 'CierrePlaga3'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mazorca': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '3'}),
'monilla': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '3'}),
'tema': ('django.db.models.fields.IntegerField', [], {}),
'zompopos': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '3'})
},
u'guias_cacao.cierresuelo1': {
'Meta': {'object_name': 'CierreSuelo1'},
'abono': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
'hojarasca': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organico': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'tema': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.cierresuelo2': {
'Meta': {'object_name': 'CierreSuelo2'},
'abono': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
'hojarasca': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organico': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'tema': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.cierresuelo3': {
'Meta': {'object_name': 'CierreSuelo3'},
'abono': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCierre']"}),
'hojarasca': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organico': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'tema': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.cobertura1': {
'Meta': {'object_name': 'Cobertura1'},
'cobertura': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cobertura2': {
'Meta': {'object_name': 'Cobertura2'},
'cobertura': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cobertura3': {
'Meta': {'object_name': 'Cobertura3'},
'cobertura': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cosechaanalisis': {
'Meta': {'object_name': 'CosechaAnalisis'},
'analisis1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'analisis2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '5'}),
'analisis3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cosechaareaplantas': {
'Meta': {'object_name': 'CosechaAreaPlantas'},
'area': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plantas': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.cosechaconversacion1': {
'Meta': {'object_name': 'CosechaConversacion1'},
'conversacion1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '7'}),
'conversacion2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '7'}),
'conversacion3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'conversacion4': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '7'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cosechaconversacion2': {
'Meta': {'object_name': 'CosechaConversacion2'},
'conversacion5': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'conversacion6': ('django.db.models.fields.FloatField', [], {}),
'conversacion7': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '5'}),
'conversacion8': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.cosechamesescosecha': {
'Meta': {'object_name': 'CosechaMesesCosecha'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
'floracion': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mes': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.cosechamesesfloracion': {
'Meta': {'object_name': 'CosechaMesesFloracion'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
'floracion': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mes': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.cosechapunto1': {
'Meta': {'object_name': 'CosechaPunto1'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mazorcas': ('django.db.models.fields.IntegerField', [], {}),
'planta_1': ('django.db.models.fields.FloatField', [], {}),
'planta_10': ('django.db.models.fields.FloatField', [], {}),
'planta_2': ('django.db.models.fields.FloatField', [], {}),
'planta_3': ('django.db.models.fields.FloatField', [], {}),
'planta_4': ('django.db.models.fields.FloatField', [], {}),
'planta_5': ('django.db.models.fields.FloatField', [], {}),
'planta_6': ('django.db.models.fields.FloatField', [], {}),
'planta_7': ('django.db.models.fields.FloatField', [], {}),
'planta_8': ('django.db.models.fields.FloatField', [], {}),
'planta_9': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.cosechapunto2': {
'Meta': {'object_name': 'CosechaPunto2'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mazorcas': ('django.db.models.fields.IntegerField', [], {}),
'planta_1': ('django.db.models.fields.FloatField', [], {}),
'planta_10': ('django.db.models.fields.FloatField', [], {}),
'planta_2': ('django.db.models.fields.FloatField', [], {}),
'planta_3': ('django.db.models.fields.FloatField', [], {}),
'planta_4': ('django.db.models.fields.FloatField', [], {}),
'planta_5': ('django.db.models.fields.FloatField', [], {}),
'planta_6': ('django.db.models.fields.FloatField', [], {}),
'planta_7': ('django.db.models.fields.FloatField', [], {}),
'planta_8': ('django.db.models.fields.FloatField', [], {}),
'planta_9': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.cosechapunto3': {
'Meta': {'object_name': 'CosechaPunto3'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaCosecha']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mazorcas': ('django.db.models.fields.IntegerField', [], {}),
'planta_1': ('django.db.models.fields.FloatField', [], {}),
'planta_10': ('django.db.models.fields.FloatField', [], {}),
'planta_2': ('django.db.models.fields.FloatField', [], {}),
'planta_3': ('django.db.models.fields.FloatField', [], {}),
'planta_4': ('django.db.models.fields.FloatField', [], {}),
'planta_5': ('django.db.models.fields.FloatField', [], {}),
'planta_6': ('django.db.models.fields.FloatField', [], {}),
'planta_7': ('django.db.models.fields.FloatField', [], {}),
'planta_8': ('django.db.models.fields.FloatField', [], {}),
'planta_9': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.datosanalisis': {
'Meta': {'object_name': 'DatosAnalisis'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'unidad': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'valor_critico': ('django.db.models.fields.FloatField', [], {}),
'variable': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'guias_cacao.especies': {
'Meta': {'object_name': 'Especies'},
'foto': (u'sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'g_altura': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'g_ancho': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'g_diametro': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'm_altura': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'm_ancho': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'm_diametro': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'nombre_cientifico': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'p_altura': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'p_ancho': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'p_diametro': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tipo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tipo_uso': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9', 'null': 'True', 'blank': 'True'})
},
u'guias_cacao.fichacierre': {
'Meta': {'object_name': 'FichaCierre'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_productor_cierre'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_tecnico_cierre'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.fichacosecha': {
'Meta': {'object_name': 'FichaCosecha'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_productor_cosecha'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_tecnico_cosecha'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.fichapiso': {
'Meta': {'object_name': 'FichaPiso'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_productor_piso'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_tecnico_piso'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.fichaplaga': {
'Meta': {'object_name': 'FichaPlaga'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_productor_plaga'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_tecnico_plaga'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.fichapoda': {
'Meta': {'object_name': 'FichaPoda'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'setproductor'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'settecnico'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.fichasaf': {
'Meta': {'object_name': 'FichaSaf'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_productor_saf'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_tecnico_saf'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.fichasombra': {
'Meta': {'object_name': 'FichaSombra'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_productor'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_tecnico'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.fichasuelo': {
'Meta': {'object_name': 'FichaSuelo'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_productor_suelo'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_tecnico_suelo'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.fichavivero': {
'Meta': {'object_name': 'FichaVivero'},
'fecha_visita': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_productor_vivero'", 'to': u"orm['mapeo.Persona']"}),
'tecnico': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'persona_tecnico_vivero'", 'to': u"orm['mapeo.Persona']"})
},
u'guias_cacao.foto1': {
'Meta': {'object_name': 'Foto1'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
'foto': (u'sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.foto2': {
'Meta': {'object_name': 'Foto2'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
'foto': (u'sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.foto3': {
'Meta': {'object_name': 'Foto3'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
'foto': (u'sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.manejopoda': {
'Meta': {'object_name': 'ManejoPoda'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
'formacion': ('django.db.models.fields.IntegerField', [], {}),
'herramientas': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.manejoscierre': {
'Meta': {'object_name': 'ManejosCierre'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'guias_cacao.manejosombra': {
'Meta': {'object_name': 'ManejoSombra'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
'formacion': ('django.db.models.fields.IntegerField', [], {}),
'herramientas': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.observacionpunto1': {
'Meta': {'object_name': 'ObservacionPunto1'},
'cinco': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cuatro': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dies': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ocho': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planta': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'seis': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'siete': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uno': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.observacionpunto1nivel': {
'Meta': {'object_name': 'ObservacionPunto1Nivel'},
'cinco': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cuatro': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dies': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ocho': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planta': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'siete': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uno': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.observacionpunto2': {
'Meta': {'object_name': 'ObservacionPunto2'},
'cinco': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cuatro': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dies': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ocho': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planta': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'seis': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'siete': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uno': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.observacionpunto2nivel': {
'Meta': {'object_name': 'ObservacionPunto2Nivel'},
'cinco': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cuatro': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dies': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ocho': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planta': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'siete': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uno': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.observacionpunto3': {
'Meta': {'object_name': 'ObservacionPunto3'},
'cinco': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cuatro': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dies': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ocho': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planta': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'seis': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'siete': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uno': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.observacionpunto3nivel': {
'Meta': {'object_name': 'ObservacionPunto3Nivel'},
'cinco': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cuatro': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dies': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ocho': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planta': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'siete': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'tres': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uno': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.orientacion': {
'Meta': {'object_name': 'Orientacion'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
'fuentes': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.pisopunto1': {
'Meta': {'object_name': 'PisoPunto1'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPiso']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'punto1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'punto2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'})
},
u'guias_cacao.pisopunto10': {
'Meta': {'object_name': 'PisoPunto10'},
'equipo': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPiso']"}),
'formacion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.pisopunto3': {
'Meta': {'object_name': 'PisoPunto3'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPiso']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manejo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'meses': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'realiza': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'veces': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.pisopunto4': {
'Meta': {'object_name': 'PisoPunto4'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPiso']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manejo': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'})
},
u'guias_cacao.pisopunto5': {
'Meta': {'object_name': 'PisoPunto5'},
'conteo': ('django.db.models.fields.FloatField', [], {}),
'estado': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPiso']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.pisopunto6': {
'Meta': {'object_name': 'PisoPunto6'},
'estado': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPiso']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maleza': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'manejo': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '5'})
},
u'guias_cacao.pisopunto7': {
'Meta': {'object_name': 'PisoPunto7'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPiso']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manejo': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'sombra': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '7'}),
'suelo': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'})
},
u'guias_cacao.pisopunto8': {
'Meta': {'object_name': 'PisoPunto8'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPiso']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meses': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'parte': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'piso': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.plagasenfermedad': {
'Meta': {'object_name': 'PlagasEnfermedad'},
'dano': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plagas': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'promedio': ('django.db.models.fields.FloatField', [], {}),
'visto': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.problemasprincipales': {
'Meta': {'object_name': 'ProblemasPrincipales'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observadas': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '27'}),
'principales': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '27'}),
'situacion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.productosvivero': {
'Meta': {'object_name': 'ProductosVivero'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'guias_cacao.punto1': {
'Meta': {'object_name': 'Punto1'},
'especie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.Especies']"}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
'grande': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mediana': ('django.db.models.fields.FloatField', [], {}),
'pequena': ('django.db.models.fields.FloatField', [], {}),
'tipo': ('django.db.models.fields.IntegerField', [], {}),
'tipo_de_copa': ('django.db.models.fields.IntegerField', [], {}),
'uso': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto1a': {
'Meta': {'object_name': 'Punto1A'},
'cinco': ('django.db.models.fields.FloatField', [], {}),
'cuatro': ('django.db.models.fields.FloatField', [], {}),
'diez': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.FloatField', [], {}),
'ocho': ('django.db.models.fields.FloatField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.FloatField', [], {}),
'siete': ('django.db.models.fields.FloatField', [], {}),
'tres': ('django.db.models.fields.FloatField', [], {}),
'uno': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.punto1b': {
'Meta': {'object_name': 'Punto1B'},
'cinco': ('django.db.models.fields.IntegerField', [], {}),
'cuatro': ('django.db.models.fields.IntegerField', [], {}),
'diez': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {}),
'ocho': ('django.db.models.fields.IntegerField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {}),
'siete': ('django.db.models.fields.IntegerField', [], {}),
'tres': ('django.db.models.fields.IntegerField', [], {}),
'uno': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto1c': {
'Meta': {'object_name': 'Punto1C'},
'cinco': ('django.db.models.fields.IntegerField', [], {}),
'cuatro': ('django.db.models.fields.IntegerField', [], {}),
'diez': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {}),
'ocho': ('django.db.models.fields.IntegerField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {}),
'siete': ('django.db.models.fields.IntegerField', [], {}),
'tres': ('django.db.models.fields.IntegerField', [], {}),
'uno': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto1suelo': {
'Meta': {'object_name': 'Punto1Suelo'},
'abonos': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limitante': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'orientacion': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'uso_parcela': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto2': {
'Meta': {'object_name': 'Punto2'},
'especie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.Especies']"}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
'grande': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mediana': ('django.db.models.fields.FloatField', [], {}),
'pequena': ('django.db.models.fields.FloatField', [], {}),
'tipo': ('django.db.models.fields.IntegerField', [], {}),
'tipo_de_copa': ('django.db.models.fields.IntegerField', [], {}),
'uso': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto2a': {
'Meta': {'object_name': 'Punto2A'},
'cinco': ('django.db.models.fields.FloatField', [], {}),
'cuatro': ('django.db.models.fields.FloatField', [], {}),
'diez': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.FloatField', [], {}),
'ocho': ('django.db.models.fields.FloatField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.FloatField', [], {}),
'siete': ('django.db.models.fields.FloatField', [], {}),
'tres': ('django.db.models.fields.FloatField', [], {}),
'uno': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.punto2asuelo': {
'Meta': {'object_name': 'Punto2ASuelo'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {}),
'respuesta': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto2b': {
'Meta': {'object_name': 'Punto2B'},
'cinco': ('django.db.models.fields.IntegerField', [], {}),
'cuatro': ('django.db.models.fields.IntegerField', [], {}),
'diez': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {}),
'ocho': ('django.db.models.fields.IntegerField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {}),
'siete': ('django.db.models.fields.IntegerField', [], {}),
'tres': ('django.db.models.fields.IntegerField', [], {}),
'uno': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto2bsuelo': {
'Meta': {'object_name': 'Punto2BSuelo'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {}),
'respuesta': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto2c': {
'Meta': {'object_name': 'Punto2C'},
'cinco': ('django.db.models.fields.IntegerField', [], {}),
'cuatro': ('django.db.models.fields.IntegerField', [], {}),
'diez': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {}),
'ocho': ('django.db.models.fields.IntegerField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {}),
'siete': ('django.db.models.fields.IntegerField', [], {}),
'tres': ('django.db.models.fields.IntegerField', [], {}),
'uno': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto3': {
'Meta': {'object_name': 'Punto3'},
'especie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.Especies']"}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
'grande': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mediana': ('django.db.models.fields.FloatField', [], {}),
'pequena': ('django.db.models.fields.FloatField', [], {}),
'tipo': ('django.db.models.fields.IntegerField', [], {}),
'tipo_de_copa': ('django.db.models.fields.IntegerField', [], {}),
'uso': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto3a': {
'Meta': {'object_name': 'Punto3A'},
'cinco': ('django.db.models.fields.FloatField', [], {}),
'cuatro': ('django.db.models.fields.FloatField', [], {}),
'diez': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.FloatField', [], {}),
'ocho': ('django.db.models.fields.FloatField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.FloatField', [], {}),
'siete': ('django.db.models.fields.FloatField', [], {}),
'tres': ('django.db.models.fields.FloatField', [], {}),
'uno': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.punto3b': {
'Meta': {'object_name': 'Punto3B'},
'cinco': ('django.db.models.fields.IntegerField', [], {}),
'cuatro': ('django.db.models.fields.IntegerField', [], {}),
'diez': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {}),
'ocho': ('django.db.models.fields.IntegerField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {}),
'siete': ('django.db.models.fields.IntegerField', [], {}),
'tres': ('django.db.models.fields.IntegerField', [], {}),
'uno': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto3c': {
'Meta': {'object_name': 'Punto3C'},
'cinco': ('django.db.models.fields.IntegerField', [], {}),
'cuatro': ('django.db.models.fields.IntegerField', [], {}),
'diez': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dos': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPoda']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nueve': ('django.db.models.fields.IntegerField', [], {}),
'ocho': ('django.db.models.fields.IntegerField', [], {}),
'plantas': ('django.db.models.fields.IntegerField', [], {}),
'seis': ('django.db.models.fields.IntegerField', [], {}),
'siete': ('django.db.models.fields.IntegerField', [], {}),
'tres': ('django.db.models.fields.IntegerField', [], {}),
'uno': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto3suelopunto1': {
'Meta': {'object_name': 'Punto3SueloPunto1'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {}),
'respuesta': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto3suelopunto2': {
'Meta': {'object_name': 'Punto3SueloPunto2'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {}),
'respuesta': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto3suelopunto3': {
'Meta': {'object_name': 'Punto3SueloPunto3'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {}),
'respuesta': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto4suelo': {
'Meta': {'object_name': 'Punto4Suelo'},
'area': ('django.db.models.fields.FloatField', [], {}),
'densidad': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.punto4suelocosecha': {
'Meta': {'object_name': 'Punto4SueloCosecha'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'producto': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto4suelosi': {
'Meta': {'object_name': 'Punto4SueloSI'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto5sueloabonos': {
'Meta': {'object_name': 'Punto5SueloAbonos'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
'frecuencia': ('django.db.models.fields.FloatField', [], {}),
'humedad': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meses': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'tipo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.TipoFertilizantes']"}),
'unidad': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto6analisissuelo': {
'Meta': {'object_name': 'Punto6AnalisisSuelo'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valor': ('django.db.models.fields.FloatField', [], {}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.DatosAnalisis']"})
},
u'guias_cacao.punto6plagas': {
'Meta': {'object_name': 'Punto6Plagas'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manejo': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'observaciones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'sombra': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.punto7plagas': {
'Meta': {'object_name': 'Punto7Plagas'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manejo': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'meses': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'parte': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.punto7tiposuelo': {
'Meta': {'object_name': 'Punto7TipoSuelo'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto8suelopropuesta': {
'Meta': {'object_name': 'Punto8SueloPropuesta'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
'frecuencia': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meses': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'tipo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.TipoFertilizantes']"}),
'unidad': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto8y9plagas': {
'Meta': {'object_name': 'Punto8y9Plagas'},
'equipos': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaPlaga']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'guias_cacao.punto9desbalance': {
'Meta': {'object_name': 'Punto9Desbalance'},
'acciones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '1'}),
'donde': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limitaciones': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto9drenaje': {
'Meta': {'object_name': 'Punto9Drenaje'},
'acciones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '5'}),
'donde': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limitaciones': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto9enfermedades': {
'Meta': {'object_name': 'Punto9Enfermedades'},
'acciones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '5'}),
'donde': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limitaciones': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto9erosion': {
'Meta': {'object_name': 'Punto9Erosion'},
'acciones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'donde': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limitaciones': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto9exceso': {
'Meta': {'object_name': 'Punto9Exceso'},
'acciones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '1'}),
'donde': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limitaciones': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.punto9nutrientes': {
'Meta': {'object_name': 'Punto9Nutrientes'},
'acciones': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '3'}),
'donde': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limitaciones': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.puntoasuelo': {
'Meta': {'object_name': 'PuntoASuelo'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {}),
'respuesta': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.puntobsuelo': {
'Meta': {'object_name': 'PuntoBSuelo'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSuelo']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opcion': ('django.db.models.fields.IntegerField', [], {}),
'respuesta': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.reducirsombra': {
'Meta': {'object_name': 'ReducirSombra'},
'eliminando': ('django.db.models.fields.IntegerField', [], {}),
'eliminando_cuales': ('django.db.models.fields.CharField', [], {'max_length': '350'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSombra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'poda': ('django.db.models.fields.IntegerField', [], {}),
'poda_cuales': ('django.db.models.fields.CharField', [], {'max_length': '350'}),
'que_parte': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'todo': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.safconversacion1': {
'Meta': {'object_name': 'SafConversacion1'},
'conversacion1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '7'}),
'conversacion2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safconversacion2': {
'Meta': {'object_name': 'SafConversacion2'},
'conversacion3': ('django.db.models.fields.IntegerField', [], {}),
'conversacion4': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safconversacion3': {
'Meta': {'object_name': 'SafConversacion3'},
'conversacion3': ('django.db.models.fields.IntegerField', [], {}),
'conversacion4': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safconversacion4': {
'Meta': {'object_name': 'SafConversacion4'},
'conversacion5': ('django.db.models.fields.IntegerField', [], {}),
'conversacion6': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safconversacion5': {
'Meta': {'object_name': 'SafConversacion5'},
'conversacion7': ('django.db.models.fields.IntegerField', [], {}),
'conversacion8': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safconversacion6': {
'Meta': {'object_name': 'SafConversacion6'},
'conversacion10': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'conversacion11': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'conversacion9': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safconversacion7': {
'Meta': {'object_name': 'SafConversacion7'},
'conversacion12': ('django.db.models.fields.IntegerField', [], {}),
'conversacion13': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'conversacion14': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safconversacion8': {
'Meta': {'object_name': 'SafConversacion8'},
'conversacion15': ('django.db.models.fields.IntegerField', [], {}),
'conversacion16': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safconversacion9': {
'Meta': {'object_name': 'SafConversacion9'},
'conversacion17': ('django.db.models.fields.IntegerField', [], {}),
'conversacion18': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'conversacion19': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'conversacion20': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.safobservaciones': {
'Meta': {'object_name': 'SafObservaciones'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observacion1': ('django.db.models.fields.IntegerField', [], {}),
'observacion2': ('django.db.models.fields.FloatField', [], {}),
'observacion3': ('django.db.models.fields.FloatField', [], {}),
'observacion4': ('django.db.models.fields.FloatField', [], {}),
'observacion5': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.safobservaciones2': {
'Meta': {'object_name': 'SafObservaciones2'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observacion1': ('django.db.models.fields.IntegerField', [], {}),
'observacion2': ('django.db.models.fields.IntegerField', [], {}),
'observacion3': ('django.db.models.fields.IntegerField', [], {}),
'observacion4': ('django.db.models.fields.IntegerField', [], {}),
'observacion5': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.safobservaciones3': {
'Meta': {'object_name': 'SafObservaciones3'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observacion6': ('django.db.models.fields.IntegerField', [], {}),
'observacion7': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'})
},
u'guias_cacao.safobservaciones4': {
'Meta': {'object_name': 'SafObservaciones4'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observacion10': ('django.db.models.fields.IntegerField', [], {}),
'observacion11': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'observacion8': ('django.db.models.fields.IntegerField', [], {}),
'observacion9': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.safobservacionpunto1': {
'Meta': {'object_name': 'SafObservacionPunto1'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'especies': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.Especies']"}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
'frutas': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lena': ('django.db.models.fields.FloatField', [], {}),
'madera': ('django.db.models.fields.FloatField', [], {}),
'nutrientes': ('django.db.models.fields.FloatField', [], {}),
'sombra': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.safobservacionpunto2': {
'Meta': {'object_name': 'SafObservacionPunto2'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'especies': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.Especies']"}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
'frutas': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lena': ('django.db.models.fields.FloatField', [], {}),
'madera': ('django.db.models.fields.FloatField', [], {}),
'nutrientes': ('django.db.models.fields.FloatField', [], {}),
'sombra': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.safobservacionpunto3': {
'Meta': {'object_name': 'SafObservacionPunto3'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'especies': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.Especies']"}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaSaf']"}),
'frutas': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lena': ('django.db.models.fields.FloatField', [], {}),
'madera': ('django.db.models.fields.FloatField', [], {}),
'nutrientes': ('django.db.models.fields.FloatField', [], {}),
'sombra': ('django.db.models.fields.FloatField', [], {})
},
u'guias_cacao.tipofertilizantes': {
'Meta': {'object_name': 'TipoFertilizantes'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'guias_cacao.viveroconversacion2': {
'Meta': {'object_name': 'ViveroConversacion2'},
'conversacion10': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '5'}),
'conversacion11': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'conversacion12': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '5'}),
'conversacion7': ('django.db.models.fields.IntegerField', [], {}),
'conversacion8': ('django.db.models.fields.IntegerField', [], {}),
'conversacion9': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaVivero']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.vivieroanalisissituacion': {
'Meta': {'object_name': 'VivieroAnalisisSituacion'},
'analisis1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '5'}),
'analisis2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'analisis3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '15'}),
'analisis4': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaVivero']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.vivieroconversacion': {
'Meta': {'object_name': 'VivieroConversacion'},
'conversacion1': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '23'}),
'conversacion2': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '7'}),
'conversacion3': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '13'}),
'conversacion4': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '11'}),
'conversacion5': ('multiselectfield.db.fields.MultiSelectField', [], {'max_length': '9'}),
'conversacion6': ('django.db.models.fields.IntegerField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaVivero']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'guias_cacao.vivieroobservacion1': {
'Meta': {'object_name': 'VivieroObservacion1'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaVivero']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observacion1': ('django.db.models.fields.FloatField', [], {}),
'observacion2': ('django.db.models.fields.FloatField', [], {}),
'observacion3': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.vivieroobservacion2': {
'Meta': {'object_name': 'VivieroObservacion2'},
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaVivero']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observacion3': ('django.db.models.fields.IntegerField', [], {}),
'planta_1': ('django.db.models.fields.IntegerField', [], {}),
'planta_10': ('django.db.models.fields.IntegerField', [], {}),
'planta_2': ('django.db.models.fields.IntegerField', [], {}),
'planta_3': ('django.db.models.fields.IntegerField', [], {}),
'planta_4': ('django.db.models.fields.IntegerField', [], {}),
'planta_5': ('django.db.models.fields.IntegerField', [], {}),
'planta_6': ('django.db.models.fields.IntegerField', [], {}),
'planta_7': ('django.db.models.fields.IntegerField', [], {}),
'planta_8': ('django.db.models.fields.IntegerField', [], {}),
'planta_9': ('django.db.models.fields.IntegerField', [], {})
},
u'guias_cacao.vivieroobservacionproductos': {
'Meta': {'object_name': 'VivieroObservacionProductos'},
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'ficha': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.FichaVivero']"}),
'frecuencia': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'producto': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['guias_cacao.ProductosVivero']"}),
'unidad': ('django.db.models.fields.IntegerField', [], {})
},
u'lugar.comunidad': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre', 'nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'mapeo.persona': {
'Meta': {'object_name': 'Persona'},
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comunidad': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'edad': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {}),
'tipo_persona': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['guias_cacao'] | mit | -1,066,311,567,676,605,200 | 68.903444 | 156 | 0.560064 | false |
flixcloud/flix_cloud-python | FlixCloud.py | 1 | 13916 | """
FlixCloud API Python Library
Version: 0.2
Author: Pawel Markowski <[email protected]>
"""
import xml.dom.minidom
import httplib
def xml_str(doc, tag, parentTag=None, debug=False):
"""Returns string from xml document from tag (and parentTag)"""
if parentTag:
try:
el = doc.getElementsByTagName(parentTag)[0]
except IndexError:
if debug:
print 'No such parent tag: %s' % parentTag
return None
doc = el
try:
result = doc.getElementsByTagName(tag)[0].firstChild.data.strip()
except IndexError:
if debug:
print 'No such tag: %s' % tag
return None
return result
def xml_int(doc, tag, parentTag=None, debug=False):
"""Returns int from xml document from tag (and parentTag)."""
if parentTag:
try:
el = doc.getElementsByTagName(parentTag)[0]
except IndexError:
if debug:
print 'No such parent tag: %s' % parentTag
return None
doc = el
try:
result = int(doc.getElementsByTagName(tag)[0].firstChild.data)
except IndexError:
if debug:
print 'No such tag: %s' % tag
return None
return result
class Job:
"""FlixCloud Job."""
def __init__(self, api_key, params=None):
#API url: https://www.flixcloud.com/jobs
self.api_base_url = 'www.flixcloud.com'
self.api_req_url = '/jobs'
self.api_key = api_key
self.recipe_id = None
self.recipe_name = None
self.input = None
self.output = None
self.watermark = None
self.notification_url = None
self.result = {}
self.errors = []
if params:
if 'recipe_id' in params:
self.recipe_id = str(params['recipe_id'])
elif 'recipe_name' in params:
self.recipe_name = params['recipe_name']
if 'input_url' in params:
self.set_input(params['input_url'],
params.get('input_user', None),
params.get('input_password', None))
if 'output_url' in params:
self.set_output(params['output_url'],
params.get('output_user', None),
params.get('output_password', None))
if 'watermark_url' in params:
self.set_watermark(params['watermark_url'],
params.get('watermark_user', None),
params.get('watermark_password', None))
if 'notification_url' in params:
self.set_notification_url(params['notification_url'])
if 'send' in params:
if params['send']:
self.send()
def set_input(self, url, user=None, password=None):
"""Sets input file data."""
self.input = JobInputFile(url, user, password)
def set_output(self, url, user=None, password=None):
"""Sets output file data."""
self.output = JobOutputFile(url, user, password)
def set_watermark(self, url, user=None, password=None):
"""Sets watermark file data."""
self.watermark = JobWatermarkFile(url, user, password)
def set_notification_url(self, notify_url):
"""Sets the notification url"""
self.notification_url = notify_url
def validate(self):
"""Checks if data are correct and ready to send."""
if not (self.recipe_id or self.recipe_name):
self.errors.append('Either recipe_id or recipe_name is required.')
if not self.api_key:
self.errors.append('API key is required.')
if not (self.input and self.output):
self.errors.append('Input and output files are required.')
#Validate files:
for jobFile in (self.input, self.output, self.watermark):
if jobFile:
if not jobFile.valid():
self.errors.extend(jobFile.errors)
if self.errors == []:
return True
else:
return False
def get_job_xml(self):
"""Create xml for FlixCloud job request."""
doc = xml.dom.minidom.Document()
api_req = doc.createElement('api-request')
doc.appendChild(api_req)
api_key = doc.createElement('api-key')
api_key.appendChild(doc.createTextNode(self.api_key))
api_req.appendChild(api_key)
if self.recipe_id:
recipe_id = doc.createElement('recipe-id')
recipe_id.appendChild(doc.createTextNode(str(self.recipe_id)))
api_req.appendChild(recipe_id)
elif self.recipe_name:
recipe_name = doc.createElement('recipe-name')
recipe_name.appendChild(doc.createTextNode(self.recipe_name))
api_req.appendChild(recipe_name)
if self.notification_url:
notify_url = doc.createElement('notification-url')
notify_url.appendChild(doc.createTextNode(self.notification_url))
api_req.appendChild(notify_url)
#file-locations node
file_locations = doc.createElement('file-locations')
#input node
if self.input:
input = doc.createElement('input')
input_url = doc.createElement('url')
input_url.appendChild(doc.createTextNode(self.input.url))
input.appendChild(input_url)
if self.input.user and self.input.password:
input_parameters = doc.createElement('parameters')
input_user = doc.createElement('user')
input_user.appendChild(doc.createTextNode(self.input.user))
input_password = doc.createElement('password')
input_password.appendChild(
doc.createTextNode(self.input.password))
input_parameters.appendChild(input_user)
input_parameters.appendChild(input_password)
input.appendChild(input_parameters)
file_locations.appendChild(input)
#output node
if self.output:
output = doc.createElement('output')
output_url = doc.createElement('url')
output_url.appendChild(doc.createTextNode(self.output.url))
output.appendChild(output_url)
if self.output.user and self.output.password:
output_parameters = doc.createElement('parameters')
output_user = doc.createElement('user')
output_user.appendChild(doc.createTextNode(self.output.user))
output_password = doc.createElement('password')
output_password.appendChild(
doc.createTextNode(self.output.password))
output_parameters.appendChild(output_user)
output_parameters.appendChild(output_password)
output.appendChild(output_parameters)
file_locations.appendChild(output)
#watermark node
if self.watermark:
watermark = doc.createElement('watermark')
watermark_url = doc.createElement('url')
watermark_url.appendChild(doc.createTextNode(self.watermark.url))
watermark.appendChild(watermark_url)
if self.watermark.user and self.watermark.password:
watermark_parameters = doc.createElement('parameters')
watermark_user = doc.createElement('user')
watermark_user.appendChild(
doc.createTextNode(self.watermark.user))
watermark_password = doc.createElement('password')
watermark_password.appendChild(
doc.createTextNode(self.watermark.password))
watermark_parameters.appendChild(watermark_user)
watermark_parameters.appendChild(watermark_password)
watermark.appendChild(watermark_parameters)
file_locations.appendChild(watermark)
api_req.appendChild(file_locations)
return doc.toprettyxml(encoding='UTF-8', indent=' ')
def send(self):
"""Send FlixCloud job request."""
self.success = False
if not self.validate():
print self.errors
return False
self.final_xml = self.get_job_xml()
#HTTPS connection
headers = {'Accept' : 'text/xml', 'Content-type' : 'application/xml'}
conn = httplib.HTTPSConnection(host=self.api_base_url)
#Send job request
try:
conn.request('POST', self.api_req_url, self.final_xml, headers)
except Exception, e:
self.errors.append('Connection error: %' % e.__str__())
return False
response = conn.getresponse()
self.result['code'] = response.status
self.result['reason'] = response.reason
self.result['data'] = response.read()
self.result['xml'] = xml.dom.minidom.parseString(self.result['data'])
if response.status == 201: #Success
self.success = True
self.set_job_data()
return True
else: #Failure
self.errors.append('Send error: %s, %s' %
(self.result['code'],
self.result['reason'])
)
for error in self.result['xml'].getElementsByTagName('error'):
self.errors.append(error.firstChild.data)
return False
def set_job_data(self):
"""Sets job's data if job was succesfully registered."""
self.id = xml_int(self.result['xml'], 'id')
self.initialized_job_at = xml_str(self.result['xml'],
'initialized-job-at')
class JobFile:
"""Base class for files (input, output, watermark objects)."""
def __init__(self, url, user=None, password=None):
self.name = 'Base'
self.url = url
self.user = user
self.password = password
self.errors = []
#From notification:
self.width = None
self.height = None
self.size = None
self.duration = None
self.cost = None
def valid(self):
"""Checks if file object is valid for use."""
if not self.url:
self.errors.append('%s: Url is required.' % self.name)
if (self.user and not self.password) \
or (self.password and not self.user):
self.errors.append('%s: Both user & password are required.' %
self.name)
if self.errors == []:
return True
else:
return False
class JobInputFile(JobFile):
"""Input file data."""
def __init__(self, *args, **kwargs):
JobFile.__init__(self, *args, **kwargs)
self.name = 'Input'
class JobOutputFile(JobFile):
"""Output file data."""
def __init__(self, *args, **kwargs):
JobFile.__init__(self, *args, **kwargs)
self.name = 'Output'
class JobWatermarkFile(JobFile):
"""Watermark file data."""
def __init__(self, *args, **kwargs):
JobFile.__init__(self, *args, **kwargs)
self.name = 'Watermark'
class JobNotification:
"""Notification about registered job."""
def __init__(self, msg):
#job
self.id = None
self.finished_job_at = None
self.initialized_job_at = None
self.recipe_id = None
self.recipe_name = None
self.state = None
self.error_message = None
#files
self.input_media_file = None
self.output_media_file = None
self.watermark = None
self.xml_msg = msg
self.parse_msg()
def parse_msg(self):
"""Parses xml notification and sets parameters."""
doc = xml.dom.minidom.parseString(self.xml_msg)
self.id = xml_int(doc, 'id')
self.initialized_job_at = xml_str(doc, 'initialized-job-at')
self.finished_job_at = xml_str(doc, 'finished-job-at')
self.recipe_id = xml_int(doc, 'recipe-id')
self.recipe_name = xml_str(doc, 'recipe-name')
self.state = xml_str(doc, 'state')
self.error_message = xml_str(doc, 'error-message')
#files data
try:
input_xml = doc.getElementsByTagName('input-media-file')[0]
self.input_media_file = JobInputFile(xml_str(input_xml, 'url'))
self.set_file_params(self.input_media_file, input_xml)
except IndexError:
print 'No input file defined.'
try:
output_xml = doc.getElementsByTagName('output-media-file')[0]
self.output_media_file = JobOutputFile(xml_str(output_xml, 'url'))
self.set_file_params(self.output_media_file, output_xml)
except IndexError:
print 'No output file defined.'
try:
watermark_xml = doc.getElementsByTagName('watermark-file')[0]
self.watermark_file = JobWatermarkFile(
xml_str(watermark_xml, 'url'))
self.set_file_params(self.watermark_file, watermark_xml)
except IndexError:
print 'No watermark file defined.'
def set_file_params(self, file, xml_doc):
"""Sets parameters for file according to received notification."""
file.width = xml_int(xml_doc, 'width')
file.height = xml_int(xml_doc, 'height')
file.size = xml_int(xml_doc, 'size')
file.duration = xml_int(xml_doc, 'duration')
file.cost = xml_int(xml_doc, 'cost')
def state(self):
"""Returns jobs state according to received notification."""
return self.state
| mit | -7,951,541,377,709,118,000 | 34.958656 | 80 | 0.56374 | false |
ellipsis14/dolfin-adjoint | dolfin_adjoint/adjglobals.py | 1 | 3176 | import coeffstore
import expressions
import caching
import libadjoint
from dolfin_adjoint import backend
if backend.__name__ == "dolfin":
import lusolver
# Create the adjointer, the central object that records the forward solve
# as it happens.
adjointer = libadjoint.Adjointer()
mem_checkpoints = set()
disk_checkpoints = set()
adj_variables = coeffstore.CoeffStore()
def adj_start_timestep(time=0.0):
'''Dolfin does not supply us with information about timesteps, and so more information
is required from the user for certain features. This function should be called at the
start of the time loop with the initial time (defaults to 0).
See also: :py:func:`dolfin_adjoint.adj_inc_timestep`
'''
if not backend.parameters["adjoint"]["stop_annotating"]:
adjointer.time.start(time)
def adj_inc_timestep(time=None, finished=False):
'''Dolfin does not supply us with information about timesteps, and so more information
is required from the user for certain features. This function should be called at
the end of the time loop with two arguments:
- :py:data:`time` -- the time at the end of the timestep just computed
- :py:data:`finished` -- whether this is the final timestep.
With this information, complex functional expressions using the :py:class:`Functional` class
can be used.
The finished argument is necessary because the final step of a functional integration must perform
additional calculations.
See also: :py:func:`dolfin_adjoint.adj_start_timestep`
'''
if not backend.parameters["adjoint"]["stop_annotating"]:
adj_variables.increment_timestep()
if time is not None:
adjointer.time.next(time)
if finished:
adjointer.time.finish()
# A dictionary that saves the functionspaces of all checkpoint variables that have been saved to disk
checkpoint_fs = {}
function_names = set()
def adj_check_checkpoints():
adjointer.check_checkpoints()
def adj_reset_cache():
if backend.parameters["adjoint"]["debug_cache"]:
backend.info_blue("Resetting solver cache")
caching.assembled_fwd_forms.clear()
caching.assembled_adj_forms.clear()
caching.lu_solvers.clear()
caching.localsolvers.clear()
caching.pis_fwd_to_tlm.clear()
caching.pis_fwd_to_adj.clear()
if backend.__name__ == "dolfin":
lusolver.lu_solvers = [None] * len(lusolver.lu_solvers)
lusolver.adj_lu_solvers = [None] * len(lusolver.adj_lu_solvers)
def adj_html(*args, **kwargs):
'''This routine dumps the current state of the adjglobals.adjointer to a HTML visualisation.
Use it like:
- adj_html("forward.html", "forward") # for the equations recorded on the forward run
- adj_html("adjoint.html", "adjoint") # for the equations to be assembled on the adjoint run
'''
return adjointer.to_html(*args, **kwargs)
def adj_reset():
'''Forget all annotation, and reset the entire dolfin-adjoint state.'''
adjointer.reset()
expressions.expression_attrs.clear()
adj_variables.__init__()
function_names.__init__()
adj_reset_cache()
backend.parameters["adjoint"]["stop_annotating"] = False
# Map from FunctionSpace to LUSolver that has factorised the fsp mass matrix
fsp_lu = {}
| lgpl-3.0 | 133,383,022,570,906,910 | 31.742268 | 101 | 0.729534 | false |
michele-mada/cv-eyetracking-project-2017 | py_eyetracker_v1.0/utils/histogram/iif.py | 1 | 1085 | from math import exp, sqrt
import numpy as np
from utils.histogram.cl_run_iif import CL_IIF, CL_IIF_BINID
from utils.histogram.lsh import num_bins
def illumination_invariant_features_hybrid(image, histogram, k=0.1, debug_ax=None):
width, height, nbins = np.shape(histogram)
fast_binid = CL_IIF_BINID()
fast_binid.load_program()
bp_mtx = fast_binid.compute(image, nbins)
b_mtx = np.ndarray((1,1, nbins))
b_mtx[0, 0,:] = np.arange(0, nbins)
b_mtx = np.tile(b_mtx, (width, height, 1))
# contruct pixel intensity matrix
i_mtx = np.repeat(image[:, :, np.newaxis], nbins, axis=2)
i_mtx *= 255
i_mtx = k * i_mtx
i_mtx[i_mtx < k] = k
# compute illumination invariant features
X = -((b_mtx - bp_mtx) ** 2) / (2 * (i_mtx ** 2))
e_mtx = np.exp(X)
Ip_mtx = e_mtx * histogram
feature_img = np.sum(Ip_mtx, 2)
return feature_img
fast_iif = CL_IIF(num_bins=num_bins)
fast_iif.load_program()
def illumination_invariant_features_cl(image, histogram, k=0.1, debug_ax=None):
return fast_iif.compute(image, histogram, k) | mit | -2,282,565,088,396,295,200 | 28.351351 | 83 | 0.645161 | false |
yterauchi/primecloud-controller | auto-tool/auto-cli/lib/common/CommonUtils.py | 5 | 15572 | # -*- coding: utf-8 -*-
import subprocess
import sys
import re
import os
import json
import glob
import socket
from urlparse import urlparse
from ArgumentManager import ArgumentManager
from db.MysqlConnector import MysqlConnector
from ast import literal_eval
from sqlalchemy.sql.expression import and_
def checkDbConnection():
#DB接続テスト
conn = MysqlConnector()
try:
sql = "SELECT 1 FROM dual"
result = conn.selectOne(sql)
return True
except Exception as e:
return {'result':'1','message':"PCCデータベースへの接続に失敗しました。処理を終了します。"}
def getPlatformTypeByName(platformName):
conn = MysqlConnector()
try:
table = conn.getTable("PLATFORM")
except Exception as e:
return None
plData = conn.selectOne(table.select(table.c.PLATFORM_NAME==platformName))
if plData != None:
return plData["PLATFORM_TYPE"]
else:
return None
def getPlatformTypeByNo(platformNo):
conn = MysqlConnector()
try:
table = conn.getTable("PLATFORM")
except Exception as e:
return None
plData = conn.selectOne(table.select(table.c.PLATFORM_NO==platformNo))
if plData != None:
return plData["PLATFORM_TYPE"]
else:
return None
def getPlatformDataByName(platformName):
conn = MysqlConnector()
try:
table = conn.getTable("PLATFORM")
except Exception as e:
return None
plData = conn.selectOne(table.select(table.c.PLATFORM_NAME==platformName))
if plData != None:
return plData
else:
return None
def getPlatformDataByNo(platformNo):
conn = MysqlConnector()
try:
table = conn.getTable("PLATFORM")
except Exception as e:
return None
plData = conn.selectOne(table.select(table.c.PLATFORM_NO==platformNo))
if plData != None:
return plData
else:
return None
def getPlatformDataByIaas(iaasName):
conn = MysqlConnector()
try:
table = conn.getTable("PLATFORM")
except Exception as e:
return None
plData = conn.select(table.select(table.c.PLATFORM_TYPE==iaasName))
if plData != None:
return plData
else:
return None
def getImageDataByNo(imageNo):
conn = MysqlConnector()
try:
table = conn.getTable("IMAGE")
except Exception as e:
return None
imageData = conn.selectOne(table.select(table.c.IMAGE_NO==imageNo))
if imageData != None:
return imageData
else:
return None
def getImageDataByName(imageName):
conn = MysqlConnector()
try:
table = conn.getTable("IMAGE")
except Exception as e:
return None
imageData = conn.select(table.select(table.c.IMAGE_NAME==imageName))
if len(imageData) != 0:
return imageData
else:
return None
def getImageDataByNameAndPlatformNo(imageName, platformNo):
conn = MysqlConnector()
try:
table = conn.getTable("IMAGE")
except Exception as e:
return None
imageData = conn.selectOne(table.select(and_(table.c.IMAGE_NAME==imageName, table.c.PLATFORM_NO==platformNo)))
if imageData != None:
return imageData
else:
return None
def getComponentTypeNoByName(serviceName):
conn = MysqlConnector()
try:
table = conn.getTable("COMPONENT_TYPE")
except Exception as e:
return None
compTypeData = conn.selectOne(table.select(table.c.COMPONENT_TYPE_NAME==serviceName))
if compTypeData != None:
return str(compTypeData["COMPONENT_TYPE_NO"])
else:
return None
def getComponentTypeNameByNo(serviceNo):
conn = MysqlConnector()
try:
table = conn.getTable("COMPONENT_TYPE")
except Exception as e:
return None
compTypeData = conn.selectOne(table.select(table.c.COMPONENT_TYPE_NO==serviceNo))
if compTypeData != None:
return compTypeData["COMPONENT_TYPE_NAME"]
else:
return None
def getComponentTypeDataByName(serviceName):
conn = MysqlConnector()
try:
table = conn.getTable("COMPONENT_TYPE")
except Exception as e:
return None
compTypeData = conn.selectOne(table.select(table.c.COMPONENT_TYPE_NAME==serviceName))
if compTypeData != None:
return compTypeData
else:
return None
def getSelectablePlatformNameList():
conn = MysqlConnector()
tablePlatform = conn.getTable("PLATFORM")
platformData = conn.select(tablePlatform.select(tablePlatform.c.SELECTABLE=="1"))
#platformNameList作成
platformNameList = []
for i in range(len(platformData)):
platformNameList.append(str(platformData[i]["PLATFORM_NAME"]))
return platformNameList
def getSelectableImageNoList():
conn = MysqlConnector()
tableImage = conn.getTable("IMAGE")
imageData = conn.select(tableImage.select(tableImage.c.SELECTABLE=="1"))
#imageNameList作成
imageNameList = []
for i in range(len(imageData)):
imageNameList.append(str(imageData[i]["IMAGE_NO"]))
return imageNameList
def getSelectableStatus(selectable):
if 0 == selectable:
return "Disable"
elif 1 == selectable:
return "Enable"
else:
return False
def getMif(moduleName):
#パス設定
mifPath = "/opt/adc/pccrepo/" + moduleName + "/" + moduleName + ".json"
#ファイル存在チェック
if False == os.path.exists(mifPath):
return None
else:
mifJsonFile = open(mifPath, 'r')
mif = mifJsonFile.read()
return json.loads(mif)
def checkArguments(method, paramDict):
result = checkRequiredArgs(method, paramDict)
if result != True:
return result + "が指定されていません。正しい値を指定して下さい。"
result = checkSupportedArgs(method, paramDict)
if result != True:
return "コマンドがサポートしない引数:" + result + " が指定されています。正しい値を指定して下さい。"
result = checkLengthArgs(paramDict)
if result != True:
return result[0] + "は" + str(result[1]) + "桁以内で入力して下さい。"
result = checkFormatArgs(paramDict)
if result != True:
if "halfAlpha" == result[1]:
return result[0] + "は半角英数記号で入力して下さい。"
elif "number" == result[1]:
return result[0] + "は数字で入力して下さい。"
elif "url" == result[1]:
return result[0] + "はURL形式(http://host:port/path または https://host:port/path)で入力して下さい。"
elif "boolean" == result[1]:
return result[0] + "は0または1で入力して下さい。"
return True
def checkRequiredArgs(method, paramDict):
argObjList = ArgumentManager.PlatformArgsList
argDict = None
for argObj in argObjList:
if method == argObj["method"]:
argDict = argObj
break
reqList = argDict["required"]
for reqObj in reqList:
if reqObj not in paramDict:
return reqObj
elif isBlank(paramDict[reqObj]):
return reqObj
return True
def checkSupportedArgs(method, paramDict):
argObjList = ArgumentManager.PlatformArgsList
argDict = None
for argObj in argObjList:
if method == argObj["method"]:
argDict = argObj
break
reqList = argDict["required"]
optList = argDict["optional"]
for key in paramDict.keys():
if key not in reqList:
if key not in optList:
return key
return True
def checkLengthArgs(paramDict):
argFormatList = ArgumentManager.PlatformArgsFormat
for key in paramDict.keys():
for argFormatDict in argFormatList:
if key == argFormatDict["argument"]:
value = paramDict[key]
length = argFormatDict["length"]
if not isBlank(value) and length != None :
if len(unicode(value)) > argFormatDict["length"]:
return [key, argFormatDict["length"]]
return True
def checkFormatArgs(paramDict):
argFormatList = ArgumentManager.PlatformArgsFormat
for key in paramDict.keys():
for argFormatDict in argFormatList:
if key == argFormatDict["argument"]:
value = paramDict[key]
format = argFormatDict["format"]
if not isBlank(value) and format != None :
if "halfAlpha" == format:
if not isHalfAlpha(value):
return [key, "halfAlpha"]
elif "number" == format:
if not isNumber(value):
return [key, "number"]
elif "url" == format:
if not isUrl(value):
return [key, "url"]
elif "boolean" == format:
if not isBoolean(value):
return [key, "boolean"]
return True
def checkIaasName(paramDict):
if "iaasName" not in paramDict:
return "iaasNameが指定されていません。正しい値を指定して下さい。"
iaasName = paramDict['iaasName']
if isBlank(iaasName):
return "iaasNameが指定されていません。正しい値を指定して下さい。"
elif not isSupportedIaas(iaasName):
return "iaasName:" + iaasName + "はPCCのサポート対象外です。"
else:
return True
def isBlank(value):
if value == None:
return True
elif len(value.strip()) == 0:
return True
else:
return False
def isHalfAlpha(value):
regexp = re.compile(r'^[a-zA-Z0-9!-/:-@\[-`{-~]+$')
if regexp.search(value) == None:
return False
elif range != None:
return True
return resultDic
def isNumber(value):
regexp = re.compile(r'^[0-9]+$')
if regexp.search(value) == None:
return False
else:
return True
def isUrl(value):
url = urlparse(value)
if isBlank(url.scheme):
return False
elif "http" != url.scheme and "https" != url.scheme:
return False
elif isBlank(url.netloc):
return False
else:
return True
def isBoolean(value):
if "0" == value or "1" == value:
return True
else:
return False
def isSupportedIaas(value):
conn = MysqlConnector()
try:
table = conn.getTable("IAAS_INFO")
except Exception as e:
return False
iaasNameData = conn.selectOne(table.select(table.c.IAAS_NAME==value))
if iaasNameData != None:
return True
else:
return False
def getSplittedUrl(value):
url = urlparse(value)
scheme = url.scheme
host = url.netloc.strip(':' + str(url.port))
port = url.port
path = url.path
return {'scheme':scheme, 'host':host, 'port':port, 'path':path}
def getConnector():
befFile = open("/opt/adc/conf/config.properties", "r")
user = None
pswd = None
url = None
dbname = None
lines = befFile.readlines()
for line in lines:
if "db.url" in line:
connText = line.split("=")[1].strip().rstrip("\n").replace("jdbc:", "")
url = urlparse(connText)
scheme = url.scheme
host = url.netloc.strip(':' + str(url.port))
port = url.port
path = url.path
def getConnectZabbixApi():
url = readZabbixProperties("zaburl")
user = readZabbixProperties("zabuser")
pswd = readZabbixProperties("zabpswd")
#取得に失敗した場合はNoneを返却
if url is None or user is None or pswd is None:
return None
#curlコマンド発行
param = '{"jsonrpc":"2.0", "method":"user.login", "params":{"user":"' + user + '", "password":"' + pswd + '"}, "auth":null, "id":0}'
ret = subprocess.Popen('curl -s -H "Accept: application/json" -H "Content-type: application/json" ' + url + "api_jsonrpc.php -i -X POST -d '" + param + "'", stdout=subprocess.PIPE, shell=True)
retJson = None
while True:
line = ret.stdout.readline()
if not line:
break
elif (line.startswith('{') and line.endswith('}')) or (line.startswith('[') and line.endswith(']')):
retJson = line.rstrip()
break
if retJson is None:
return None
retDic = json.loads(retJson)
if isinstance(retDic,list):
retDic = retDic[0]
if retDic.has_key("error"):
return None
elif retDic.has_key("result"):
return retDic["result"]
else:
return None
def getZabbixTemplate(auth, templateName):
#zabbix URL取得
url = readZabbixProperties("zaburl")
#curlコマンド発行
param = '{"jsonrpc": "2.0","method": "template.get","params": {"output": "extend","filter": {"host": "' + templateName + '"}},"auth": "' + auth + '","id": 2}'
ret = subprocess.Popen('curl -s -H "Accept: application/json" -H "Content-type: application/json" ' + url + "api_jsonrpc.php -i -X POST -d '" + param + "'", stdout=subprocess.PIPE, shell=True)
retJson = None
while True:
line = ret.stdout.readline()
if not line:
break
elif (line.startswith('{') and line.endswith('}')) or (line.startswith('[') and line.endswith(']')):
retJson = line.rstrip()
break
if retJson is None:
return None
retDic = json.loads(retJson)
if isinstance(retDic,list):
retDic = retDic[0]
if retDic.has_key("error"):
return False
elif retDic.has_key("result") and len(retDic["result"]) > 0:
return True
else:
return False
def createZabbixTemplate(auth, templateName):
#zabbix URL取得
url = readZabbixProperties("zaburl")
#curlコマンド発行
param = '{"jsonrpc": "2.0","method": "template.create","params": {"host": "' + templateName + '","groups": [{"groupid": "1"}]},"auth": "' + auth + '","id": 1}'
ret = subprocess.Popen('curl -s -H "Accept: application/json" -H "Content-type: application/json" ' + url + "api_jsonrpc.php -i -X POST -d '" + param + "'", stdout=subprocess.PIPE, shell=True)
retJson = None
while True:
line = ret.stdout.readline()
if not line:
break
elif (line.startswith('{') and line.endswith('}')) or (line.startswith('[') and line.endswith(']')):
retJson = line.rstrip()
break
if retJson is None:
return None
retDic = json.loads(retJson)
if isinstance(retDic,list):
retDic = retDic[0]
if retDic.has_key("error"):
return "zabbixテンプレート:" + templateName + "の登録に失敗しました。"
elif retDic.has_key("result"):
return True
else:
return False
def readZabbixProperties(name):
confFile = open("/opt/adc/conf/config.properties", "r")
lines = confFile.readlines()
for line in lines:
if "zaburl" == name and "zabbix.url" in line:
return line.split("=")[1].strip().rstrip("\n")
elif "zabuser" == name and "zabbix.username" in line:
return line.split("=")[1].strip().rstrip("\n")
elif "zabpswd" == name and "zabbix.password" in line:
return line.split("=")[1].strip().rstrip("\n")
return None
if __name__ == '__main__':
print(checkDbConnection())
| gpl-2.0 | -4,635,138,895,080,776,000 | 28.388235 | 197 | 0.602682 | false |
Jeff-Tian/mybnb | Python27/Lib/bsddb/test/test_sequence.py | 3 | 5410 | import unittest
import os
from test_all import db, test_support, get_new_environment_path, get_new_database_path
class DBSequenceTest(unittest.TestCase):
def setUp(self):
self.int_32_max = 0x100000000
self.homeDir = get_new_environment_path()
self.filename = "test"
self.dbenv = db.DBEnv()
self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL, 0666)
self.d = db.DB(self.dbenv)
self.d.open(self.filename, db.DB_BTREE, db.DB_CREATE, 0666)
def tearDown(self):
if hasattr(self, 'seq'):
self.seq.close()
del self.seq
if hasattr(self, 'd'):
self.d.close()
del self.d
if hasattr(self, 'dbenv'):
self.dbenv.close()
del self.dbenv
test_support.rmtree(self.homeDir)
def test_get(self):
self.seq = db.DBSequence(self.d, flags=0)
start_value = 10 * self.int_32_max
self.assertEqual(0xA00000000, start_value)
self.assertEqual(None, self.seq.initial_value(start_value))
self.assertEqual(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE))
self.assertEqual(start_value, self.seq.get(5))
self.assertEqual(start_value + 5, self.seq.get())
def test_remove(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(None, self.seq.remove(txn=None, flags=0))
del self.seq
def test_get_key(self):
self.seq = db.DBSequence(self.d, flags=0)
key = 'foo'
self.assertEqual(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE))
self.assertEqual(key, self.seq.get_key())
def test_get_dbp(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(self.d, self.seq.get_dbp())
def test_cachesize(self):
self.seq = db.DBSequence(self.d, flags=0)
cashe_size = 10
self.assertEqual(None, self.seq.set_cachesize(cashe_size))
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(cashe_size, self.seq.get_cachesize())
def test_flags(self):
self.seq = db.DBSequence(self.d, flags=0)
flag = db.DB_SEQ_WRAP;
self.assertEqual(None, self.seq.set_flags(flag))
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(flag, self.seq.get_flags() & flag)
def test_range(self):
self.seq = db.DBSequence(self.d, flags=0)
seq_range = (10 * self.int_32_max, 11 * self.int_32_max - 1)
self.assertEqual(None, self.seq.set_range(seq_range))
self.seq.initial_value(seq_range[0])
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(seq_range, self.seq.get_range())
def test_stat(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
stat = self.seq.stat()
for param in ('nowait', 'min', 'max', 'value', 'current',
'flags', 'cache_size', 'last_value', 'wait'):
self.assertTrue(param in stat, "parameter %s isn't in stat info" % param)
if db.version() >= (4,7) :
# This code checks a crash solved in Berkeley DB 4.7
def test_stat_crash(self) :
d=db.DB()
d.open(None,dbtype=db.DB_HASH,flags=db.DB_CREATE) # In RAM
seq = db.DBSequence(d, flags=0)
self.assertRaises(db.DBNotFoundError, seq.open,
key='id', txn=None, flags=0)
self.assertRaises(db.DBInvalidArgError, seq.stat)
d.close()
def test_64bits(self) :
# We don't use both extremes because they are problematic
value_plus=(1L<<63)-2
self.assertEqual(9223372036854775806L,value_plus)
value_minus=(-1L<<63)+1 # Two complement
self.assertEqual(-9223372036854775807L,value_minus)
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.initial_value(value_plus-1))
self.assertEqual(None, self.seq.open(key='id', txn=None,
flags=db.DB_CREATE))
self.assertEqual(value_plus-1, self.seq.get(1))
self.assertEqual(value_plus, self.seq.get(1))
self.seq.remove(txn=None, flags=0)
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.initial_value(value_minus))
self.assertEqual(None, self.seq.open(key='id', txn=None,
flags=db.DB_CREATE))
self.assertEqual(value_minus, self.seq.get(1))
self.assertEqual(value_minus+1, self.seq.get(1))
def test_multiple_close(self):
self.seq = db.DBSequence(self.d)
self.seq.close() # You can close a Sequence multiple times
self.seq.close()
self.seq.close()
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBSequenceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache-2.0 | -9,132,995,850,701,812,000 | 37.779412 | 86 | 0.597412 | false |
rimbalinux/MSISDNArea | django/contrib/sessions/middleware.py | 3 | 1931 | import time
from django.conf import settings
from django.utils.cache import patch_vary_headers
from django.utils.http import cookie_date
from django.utils.importlib import import_module
class SessionMiddleware(object):
def process_request(self, request):
engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
request.session = engine.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
except AttributeError:
pass
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
if modified or settings.SESSION_SAVE_EVERY_REQUEST:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = cookie_date(expires_time)
# Save the session data and refresh the client cookie.
request.session.save()
response.set_cookie(settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None)
return response
| bsd-3-clause | 7,323,104,570,838,352,000 | 42.906977 | 79 | 0.584671 | false |
MartinHjelmare/home-assistant | homeassistant/components/google_assistant/trait.py | 1 | 42398 | """Implement the Google Smart Home traits."""
import logging
from homeassistant.components import (
binary_sensor,
camera,
cover,
group,
fan,
input_boolean,
media_player,
light,
lock,
scene,
script,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_DEVICE_CLASS,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
ATTR_ASSUMED_STATE,
STATE_UNKNOWN,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.util import color as color_util, temperature as temp_util
from .const import (
ERR_VALUE_OUT_OF_RANGE,
ERR_NOT_SUPPORTED,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_CHALLENGE_NOT_SETUP,
CHALLENGE_ACK_NEEDED,
CHALLENGE_PIN_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
)
from .error import SmartHomeError, ChallengeNeeded
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = 'action.devices.traits.'
TRAIT_CAMERA_STREAM = PREFIX_TRAITS + 'CameraStream'
TRAIT_ONOFF = PREFIX_TRAITS + 'OnOff'
TRAIT_DOCK = PREFIX_TRAITS + 'Dock'
TRAIT_STARTSTOP = PREFIX_TRAITS + 'StartStop'
TRAIT_BRIGHTNESS = PREFIX_TRAITS + 'Brightness'
TRAIT_COLOR_SETTING = PREFIX_TRAITS + 'ColorSetting'
TRAIT_SCENE = PREFIX_TRAITS + 'Scene'
TRAIT_TEMPERATURE_SETTING = PREFIX_TRAITS + 'TemperatureSetting'
TRAIT_LOCKUNLOCK = PREFIX_TRAITS + 'LockUnlock'
TRAIT_FANSPEED = PREFIX_TRAITS + 'FanSpeed'
TRAIT_MODES = PREFIX_TRAITS + 'Modes'
TRAIT_OPENCLOSE = PREFIX_TRAITS + 'OpenClose'
TRAIT_VOLUME = PREFIX_TRAITS + 'Volume'
PREFIX_COMMANDS = 'action.devices.commands.'
COMMAND_ONOFF = PREFIX_COMMANDS + 'OnOff'
COMMAND_GET_CAMERA_STREAM = PREFIX_COMMANDS + 'GetCameraStream'
COMMAND_DOCK = PREFIX_COMMANDS + 'Dock'
COMMAND_STARTSTOP = PREFIX_COMMANDS + 'StartStop'
COMMAND_PAUSEUNPAUSE = PREFIX_COMMANDS + 'PauseUnpause'
COMMAND_BRIGHTNESS_ABSOLUTE = PREFIX_COMMANDS + 'BrightnessAbsolute'
COMMAND_COLOR_ABSOLUTE = PREFIX_COMMANDS + 'ColorAbsolute'
COMMAND_ACTIVATE_SCENE = PREFIX_COMMANDS + 'ActivateScene'
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
PREFIX_COMMANDS + 'ThermostatTemperatureSetpoint')
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
PREFIX_COMMANDS + 'ThermostatTemperatureSetRange')
COMMAND_THERMOSTAT_SET_MODE = PREFIX_COMMANDS + 'ThermostatSetMode'
COMMAND_LOCKUNLOCK = PREFIX_COMMANDS + 'LockUnlock'
COMMAND_FANSPEED = PREFIX_COMMANDS + 'SetFanSpeed'
COMMAND_MODES = PREFIX_COMMANDS + 'SetModes'
COMMAND_OPENCLOSE = PREFIX_COMMANDS + 'OpenClose'
COMMAND_SET_VOLUME = PREFIX_COMMANDS + 'setVolume'
COMMAND_VOLUME_RELATIVE = PREFIX_COMMANDS + 'volumeRelative'
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return 'F'
return 'C'
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [
COMMAND_BRIGHTNESS_ABSOLUTE
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == light.DOMAIN:
return features & light.SUPPORT_BRIGHTNESS
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response['brightness'] = int(100 * (brightness / 255))
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_ON, {
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params['brightness']
}, blocking=True, context=data.context)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [
COMMAND_GET_CAMERA_STREAM
]
stream_info = None
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
'cameraStreamSupportedProtocols': [
"hls",
],
'cameraStreamNeedAuthToken': False,
'cameraStreamNeedDrmEncryption': False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, 'hls')
self.stream_info = {
'cameraStreamAccessUrl': self.hass.config.api.base_url + url
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [
COMMAND_ONOFF
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {'on': self.state.state != STATE_OFF}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params['on'] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params['on'] else SERVICE_TURN_OFF
await self.hass.services.async_call(service_domain, service, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [
COMMAND_COLOR_ABSOLUTE
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
return (features & light.SUPPORT_COLOR_TEMP or
features & light.SUPPORT_COLOR)
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
features = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
response = {}
if features & light.SUPPORT_COLOR:
response['colorModel'] = 'hsv'
if features & light.SUPPORT_COLOR_TEMP:
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kevin is Max Mireds K = 1000000 / mireds
response['colorTemperatureRange'] = {
'temperatureMaxK':
color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)),
'temperatureMinK':
color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
color = {}
if features & light.SUPPORT_COLOR:
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color['spectrumHsv'] = {
'hue': color_hs[0],
'saturation': color_hs[1]/100,
'value': brightness/255,
}
if features & light.SUPPORT_COLOR_TEMP:
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning('Entity %s has incorrect color temperature %s',
self.state.entity_id, temp)
elif temp is not None:
color['temperatureK'] = \
color_util.color_temperature_mired_to_kelvin(temp)
response = {}
if color:
response['color'] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if 'temperature' in params['color']:
temp = color_util.color_temperature_kelvin_to_mired(
params['color']['temperature'])
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
"Temperature should be between {} and {}".format(min_temp,
max_temp))
await self.hass.services.async_call(
light.DOMAIN, SERVICE_TURN_ON, {
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_COLOR_TEMP: temp,
}, blocking=True, context=data.context)
elif 'spectrumRGB' in params['color']:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = "{0:06x}".format(params['color']['spectrumRGB'])
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value))
await self.hass.services.async_call(
light.DOMAIN, SERVICE_TURN_ON, {
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: color
}, blocking=True, context=data.context)
elif 'spectrumHSV' in params['color']:
color = params['color']['spectrumHSV']
saturation = color['saturation'] * 100
brightness = color['value'] * 255
await self.hass.services.async_call(
light.DOMAIN, SERVICE_TURN_ON, {
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color['hue'], saturation],
light.ATTR_BRIGHTNESS: brightness
}, blocking=True, context=data.context)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [
COMMAND_ACTIVATE_SCENE
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain, SERVICE_TURN_ON, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=self.state.domain != script.DOMAIN,
context=data.context)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [
COMMAND_DOCK
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {'isDocked': self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain, vacuum.SERVICE_RETURN_TO_BASE, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [
COMMAND_STARTSTOP,
COMMAND_PAUSEUNPAUSE
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
return {'pausable':
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE != 0}
def query_attributes(self):
"""Return StartStop query attributes."""
return {
'isRunning': self.state.state == vacuum.STATE_CLEANING,
'isPaused': self.state.state == vacuum.STATE_PAUSED,
}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params['start']:
await self.hass.services.async_call(
self.state.domain, vacuum.SERVICE_START, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
else:
await self.hass.services.async_call(
self.state.domain, vacuum.SERVICE_STOP, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
elif command == COMMAND_PAUSEUNPAUSE:
if params['pause']:
await self.hass.services.async_call(
self.state.domain, vacuum.SERVICE_PAUSE, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
else:
await self.hass.services.async_call(
self.state.domain, vacuum.SERVICE_START, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hass_to_google = {
climate.STATE_HEAT: 'heat',
climate.STATE_COOL: 'cool',
STATE_OFF: 'off',
climate.STATE_AUTO: 'heatcool',
climate.STATE_FAN_ONLY: 'fan-only',
climate.STATE_DRY: 'dry',
climate.STATE_ECO: 'eco'
}
google_to_hass = {value: key for key, value in hass_to_google.items()}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != climate.DOMAIN:
return False
return features & climate.SUPPORT_OPERATION_MODE
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
modes = []
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if supported & climate.SUPPORT_ON_OFF != 0:
modes.append(STATE_OFF)
modes.append(STATE_ON)
if supported & climate.SUPPORT_OPERATION_MODE != 0:
for mode in self.state.attributes.get(climate.ATTR_OPERATION_LIST,
[]):
google_mode = self.hass_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return {
'availableThermostatModes': ','.join(modes),
'thermostatTemperatureUnit': _google_temp_unit(
self.hass.config.units.temperature_unit)
}
def query_attributes(self):
"""Return temperature point and modes query attributes."""
attrs = self.state.attributes
response = {}
operation = attrs.get(climate.ATTR_OPERATION_MODE)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if (supported & climate.SUPPORT_ON_OFF
and self.state.state == STATE_OFF):
response['thermostatMode'] = 'off'
elif (supported & climate.SUPPORT_OPERATION_MODE and
operation in self.hass_to_google):
response['thermostatMode'] = self.hass_to_google[operation]
elif supported & climate.SUPPORT_ON_OFF:
response['thermostatMode'] = 'on'
unit = self.hass.config.units.temperature_unit
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response['thermostatTemperatureAmbient'] = \
round(temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response['thermostatHumidityAmbient'] = current_humidity
if operation == climate.STATE_AUTO:
if (supported & climate.SUPPORT_TARGET_TEMPERATURE_HIGH and
supported & climate.SUPPORT_TARGET_TEMPERATURE_LOW):
response['thermostatTemperatureSetpointHigh'] = \
round(temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH],
unit, TEMP_CELSIUS), 1)
response['thermostatTemperatureSetpointLow'] = \
round(temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW],
unit, TEMP_CELSIUS), 1)
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1)
response['thermostatTemperatureSetpointHigh'] = target_temp
response['thermostatTemperatureSetpointLow'] = target_temp
else:
target_temp = attrs.get(ATTR_TEMPERATURE)
if target_temp is not None:
response['thermostatTemperatureSetpoint'] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params['thermostatTemperatureSetpoint'], TEMP_CELSIUS,
unit)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
"Temperature should be between {} and {}".format(min_temp,
max_temp))
await self.hass.services.async_call(
climate.DOMAIN, climate.SERVICE_SET_TEMPERATURE, {
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_TEMPERATURE: temp
}, blocking=True, context=data.context)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params['thermostatTemperatureSetpointHigh'], TEMP_CELSIUS,
unit)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
"Upper bound for temperature range should be between "
"{} and {}".format(min_temp, max_temp))
temp_low = temp_util.convert(
params['thermostatTemperatureSetpointLow'], TEMP_CELSIUS,
unit)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
"Lower bound for temperature range should be between "
"{} and {}".format(min_temp, max_temp))
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {
ATTR_ENTITY_ID: self.state.entity_id,
}
if(supported & climate.SUPPORT_TARGET_TEMPERATURE_HIGH and
supported & climate.SUPPORT_TARGET_TEMPERATURE_LOW):
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN, climate.SERVICE_SET_TEMPERATURE, svc_data,
blocking=True, context=data.context)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params['thermostatMode']
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if (target_mode in [STATE_ON, STATE_OFF] and
supported & climate.SUPPORT_ON_OFF):
await self.hass.services.async_call(
climate.DOMAIN,
(SERVICE_TURN_ON
if target_mode == STATE_ON
else SERVICE_TURN_OFF),
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True, context=data.context)
elif supported & climate.SUPPORT_OPERATION_MODE:
await self.hass.services.async_call(
climate.DOMAIN, climate.SERVICE_SET_OPERATION_MODE, {
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_OPERATION_MODE:
self.google_to_hass[target_mode],
}, blocking=True, context=data.context)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [
COMMAND_LOCKUNLOCK
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
return domain == lock.DOMAIN
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
return {'isLocked': self.state.state == STATE_LOCKED}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
_verify_pin_challenge(data, challenge)
if params['lock']:
service = lock.SERVICE_LOCK
else:
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(lock.DOMAIN, service, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [
COMMAND_FANSPEED
]
speed_synonyms = {
fan.SPEED_OFF: ['stop', 'off'],
fan.SPEED_LOW: ['slow', 'low', 'slowest', 'lowest'],
fan.SPEED_MEDIUM: ['medium', 'mid', 'middle'],
fan.SPEED_HIGH: [
'high', 'max', 'fast', 'highest', 'fastest', 'maximum'
]
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != fan.DOMAIN:
return False
return features & fan.SUPPORT_SET_SPEED
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
modes = self.state.attributes.get(fan.ATTR_SPEED_LIST, [])
speeds = []
for mode in modes:
if mode not in self.speed_synonyms:
continue
speed = {
"speed_name": mode,
"speed_values": [{
"speed_synonym": self.speed_synonyms.get(mode),
"lang": 'en'
}]
}
speeds.append(speed)
return {
'availableFanSpeeds': {
'speeds': speeds,
'ordered': True
},
"reversible": bool(self.state.attributes.get(
ATTR_SUPPORTED_FEATURES, 0) & fan.SUPPORT_DIRECTION)
}
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
response = {}
speed = attrs.get(fan.ATTR_SPEED)
if speed is not None:
response['on'] = speed != fan.SPEED_OFF
response['online'] = True
response['currentFanSpeedSetting'] = speed
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetFanSpeed command."""
await self.hass.services.async_call(
fan.DOMAIN, fan.SERVICE_SET_SPEED, {
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_SPEED: params['fanSpeed']
}, blocking=True, context=data.context)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [
COMMAND_MODES
]
# Google requires specific mode names and settings. Here is the full list.
# https://developers.google.com/actions/reference/smarthome/traits/modes
# All settings are mapped here as of 2018-11-28 and can be used for other
# entity types.
HA_TO_GOOGLE = {
media_player.ATTR_INPUT_SOURCE: "input source",
}
SUPPORTED_MODE_SETTINGS = {
'xsmall': [
'xsmall', 'extra small', 'min', 'minimum', 'tiny', 'xs'],
'small': ['small', 'half'],
'large': ['large', 'big', 'full'],
'xlarge': ['extra large', 'xlarge', 'xl'],
'Cool': ['cool', 'rapid cool', 'rapid cooling'],
'Heat': ['heat'], 'Low': ['low'],
'Medium': ['medium', 'med', 'mid', 'half'],
'High': ['high'],
'Auto': ['auto', 'automatic'],
'Bake': ['bake'], 'Roast': ['roast'],
'Convection Bake': ['convection bake', 'convect bake'],
'Convection Roast': ['convection roast', 'convect roast'],
'Favorite': ['favorite'],
'Broil': ['broil'],
'Warm': ['warm'],
'Off': ['off'],
'On': ['on'],
'Normal': [
'normal', 'normal mode', 'normal setting', 'standard',
'schedule', 'original', 'default', 'old settings'
],
'None': ['none'],
'Tap Cold': ['tap cold'],
'Cold Warm': ['cold warm'],
'Hot': ['hot'],
'Extra Hot': ['extra hot'],
'Eco': ['eco'],
'Wool': ['wool', 'fleece'],
'Turbo': ['turbo'],
'Rinse': ['rinse', 'rinsing', 'rinse wash'],
'Away': ['away', 'holiday'],
'maximum': ['maximum'],
'media player': ['media player'],
'chromecast': ['chromecast'],
'tv': [
'tv', 'television', 'tv position', 'television position',
'watching tv', 'watching tv position', 'entertainment',
'entertainment position'
],
'am fm': ['am fm', 'am radio', 'fm radio'],
'internet radio': ['internet radio'],
'satellite': ['satellite'],
'game console': ['game console'],
'antifrost': ['antifrost', 'anti-frost'],
'boost': ['boost'],
'Clock': ['clock'],
'Message': ['message'],
'Messages': ['messages'],
'News': ['news'],
'Disco': ['disco'],
'antifreeze': ['antifreeze', 'anti-freeze', 'anti freeze'],
'balanced': ['balanced', 'normal'],
'swing': ['swing'],
'media': ['media', 'media mode'],
'panic': ['panic'],
'ring': ['ring'],
'frozen': ['frozen', 'rapid frozen', 'rapid freeze'],
'cotton': ['cotton', 'cottons'],
'blend': ['blend', 'mix'],
'baby wash': ['baby wash'],
'synthetics': ['synthetic', 'synthetics', 'compose'],
'hygiene': ['hygiene', 'sterilization'],
'smart': ['smart', 'intelligent', 'intelligence'],
'comfortable': ['comfortable', 'comfort'],
'manual': ['manual'],
'energy saving': ['energy saving'],
'sleep': ['sleep'],
'quick wash': ['quick wash', 'fast wash'],
'cold': ['cold'],
'airsupply': ['airsupply', 'air supply'],
'dehumidification': ['dehumidication', 'dehumidify'],
'game': ['game', 'game mode']
}
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain != media_player.DOMAIN:
return False
return features & media_player.SUPPORT_SELECT_SOURCE
def sync_attributes(self):
"""Return mode attributes for a sync request."""
sources_list = self.state.attributes.get(
media_player.ATTR_INPUT_SOURCE_LIST, [])
modes = []
sources = {}
if sources_list:
sources = {
"name": self.HA_TO_GOOGLE.get(media_player.ATTR_INPUT_SOURCE),
"name_values": [{
"name_synonym": ['input source'],
"lang": "en"
}],
"settings": [],
"ordered": False
}
for source in sources_list:
if source in self.SUPPORTED_MODE_SETTINGS:
src = source
synonyms = self.SUPPORTED_MODE_SETTINGS.get(src)
elif source.lower() in self.SUPPORTED_MODE_SETTINGS:
src = source.lower()
synonyms = self.SUPPORTED_MODE_SETTINGS.get(src)
else:
continue
sources['settings'].append(
{
"setting_name": src,
"setting_values": [{
"setting_synonym": synonyms,
"lang": "en"
}]
}
)
if sources:
modes.append(sources)
payload = {'availableModes': modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if attrs.get(media_player.ATTR_INPUT_SOURCE_LIST):
mode_settings.update({
media_player.ATTR_INPUT_SOURCE: attrs.get(
media_player.ATTR_INPUT_SOURCE)
})
if mode_settings:
response['on'] = self.state.state != STATE_OFF
response['online'] = True
response['currentModeSettings'] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute an SetModes command."""
settings = params.get('updateModeSettings')
requested_source = settings.get(
self.HA_TO_GOOGLE.get(media_player.ATTR_INPUT_SOURCE))
if requested_source:
for src in self.state.attributes.get(
media_player.ATTR_INPUT_SOURCE_LIST):
if src.lower() == requested_source.lower():
source = src
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE, {
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: source
}, blocking=True, context=data.context)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
name = TRAIT_OPENCLOSE
commands = [
COMMAND_OPENCLOSE
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
def sync_attributes(self):
"""Return opening direction."""
attrs = {}
if self.state.domain == binary_sensor.DOMAIN:
attrs['queryOnlyOpenClose'] = True
return attrs
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
if domain == cover.DOMAIN:
# When it's an assumed state, we will always report it as 50%
# Google will not issue an open command if the assumed state is
# open, even if that is currently incorrect.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
raise SmartHomeError(
ERR_NOT_SUPPORTED,
'Querying state is not supported')
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED,
'Querying state is not supported')
position = self.state.attributes.get(
cover.ATTR_CURRENT_POSITION
)
if position is not None:
response['openPercent'] = position
elif self.state.state != cover.STATE_CLOSED:
response['openPercent'] = 100
else:
response['openPercent'] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response['openPercent'] = 100
else:
response['openPercent'] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
if domain == cover.DOMAIN:
if self.state.attributes.get(ATTR_DEVICE_CLASS) in (
cover.DEVICE_CLASS_DOOR, cover.DEVICE_CLASS_GARAGE
):
_verify_pin_challenge(data, challenge)
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if params['openPercent'] == 0:
await self.hass.services.async_call(
cover.DOMAIN, cover.SERVICE_CLOSE_COVER, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
elif params['openPercent'] == 100:
await self.hass.services.async_call(
cover.DOMAIN, cover.SERVICE_OPEN_COVER, {
ATTR_ENTITY_ID: self.state.entity_id
}, blocking=True, context=data.context)
elif position is not None:
await self.hass.services.async_call(
cover.DOMAIN, cover.SERVICE_SET_COVER_POSITION, {
ATTR_ENTITY_ID: self.state.entity_id,
cover.ATTR_POSITION: params['openPercent']
}, blocking=True, context=data.context)
else:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
'Setting a position is not supported')
@register_trait
class VolumeTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [
COMMAND_SET_VOLUME,
COMMAND_VOLUME_RELATIVE,
]
@staticmethod
def supported(domain, features, device_class):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
return features & media_player.SUPPORT_VOLUME_SET
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
response = {}
level = self.state.attributes.get(
media_player.ATTR_MEDIA_VOLUME_LEVEL)
muted = self.state.attributes.get(
media_player.ATTR_MEDIA_VOLUME_MUTED)
if level is not None:
# Convert 0.0-1.0 to 0-100
response['currentVolume'] = int(level * 100)
response['isMuted'] = bool(muted)
return response
async def _execute_set_volume(self, data, params):
level = params['volumeLevel']
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET, {
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL:
level / 100
}, blocking=True, context=data.context)
async def _execute_volume_relative(self, data, params):
# This could also support up/down commands using relativeSteps
relative = params['volumeRelativeLevel']
current = self.state.attributes.get(
media_player.ATTR_MEDIA_VOLUME_LEVEL)
await self.hass.services.async_call(
media_player.DOMAIN, media_player.SERVICE_VOLUME_SET, {
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL:
current + relative / 100
}, blocking=True, context=data.context)
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, 'Command not supported')
def _verify_pin_challenge(data, challenge):
"""Verify a pin challenge."""
if not data.config.secure_devices_pin:
raise SmartHomeError(
ERR_CHALLENGE_NOT_SETUP, 'Challenge is not set up')
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
pin = challenge.get('pin')
if pin != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, challenge):
"""Verify a pin challenge."""
if not challenge or not challenge.get('ack'):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
| apache-2.0 | -8,771,102,119,974,407,000 | 33.895473 | 79 | 0.573329 | false |
konradcybulski/GameTheory1041 | Python_Resources/InstanceVariables.py | 1 | 1919 | """
@author Konrad Cybulski
@since 14/09/2016
@modified 14/09/2016
"""
import numpy as np
class InstanceVariables:
def __init__(self, runs, generations, population_size, mutation_rate,
execution_error, reputation_assignment_error,
private_assessment_error, reputation_update_rate,
socialnorm, cost, benefit):
self.runs = runs
self.generations = generations
self.population_size = population_size
self.mutation_rate = mutation_rate
self.execution_error = execution_error
self.reputation_assignment_error = reputation_assignment_error
self.assessment_error = private_assessment_error
self.reputation_update_rate = reputation_update_rate
self.socialnorm = np.array(socialnorm) # matrix determining the reputation dynamic with
# regard to the action taken and the reputation
# of the other agent
self.cost = cost # cost defining the payoff matrix cost
self.benefit = benefit # benefit defined as the payoff matrix benefit
# Population and reputation arrays
self.population = np.zeros(population_size, dtype=int) # vector of all individual strategies
# population[k] : strategy of individual k
# population[k] = 0, 1, 2 or 3
self.reputation = np.zeros(population_size, dtype=int) # vector of all individual public reputations
# reputation[k] : public reputation of individual k
# reputation[k] = 0 or 1
# Cooperation Tracking
self.coop_index_sum = float(0)
self.interaction_count = float(0)
self.track_cooperation = False
def increment_coop_index(self, coop_index):
self.coop_index_sum += float(coop_index)
self.interaction_count += 1.0
def get_average_coop_index(self):
return float(self.coop_index_sum)/float(self.interaction_count)
| gpl-3.0 | 5,178,484,987,363,511,000 | 41.644444 | 109 | 0.665451 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/_operator.py | 1 | 11985 | # encoding: utf-8
# module _operator
# from (built-in)
# by generator 1.135
"""
Operator interface.
This module exports a set of functions implemented in C corresponding
to the intrinsic operators of Python. For example, operator.add(x, y)
is equivalent to the expression x+y. The function names are those
used for special methods; variants without leading and trailing
'__' are also provided for convenience.
"""
# no imports
# functions
def abs(a): # real signature unknown; restored from __doc__
""" abs(a) -- Same as abs(a). """
pass
def add(a, b): # real signature unknown; restored from __doc__
""" add(a, b) -- Same as a + b. """
pass
def and_(a, b): # real signature unknown; restored from __doc__
""" and_(a, b) -- Same as a & b. """
pass
def concat(a, b): # real signature unknown; restored from __doc__
""" concat(a, b) -- Same as a + b, for a and b sequences. """
pass
def contains(a, b): # real signature unknown; restored from __doc__
""" contains(a, b) -- Same as b in a (note reversed operands). """
pass
def countOf(a, b): # real signature unknown; restored from __doc__
""" countOf(a, b) -- Return the number of times b occurs in a. """
pass
def delitem(a, b): # real signature unknown; restored from __doc__
""" delitem(a, b) -- Same as del a[b]. """
pass
def eq(a, b): # real signature unknown; restored from __doc__
""" eq(a, b) -- Same as a==b. """
pass
def floordiv(a, b): # real signature unknown; restored from __doc__
""" floordiv(a, b) -- Same as a // b. """
pass
def ge(a, b): # real signature unknown; restored from __doc__
""" ge(a, b) -- Same as a>=b. """
pass
def getitem(a, b): # real signature unknown; restored from __doc__
""" getitem(a, b) -- Same as a[b]. """
pass
def gt(a, b): # real signature unknown; restored from __doc__
""" gt(a, b) -- Same as a>b. """
pass
def iadd(a, b): # real signature unknown; restored from __doc__
""" a = iadd(a, b) -- Same as a += b. """
pass
def iand(a, b): # real signature unknown; restored from __doc__
""" a = iand(a, b) -- Same as a &= b. """
pass
def iconcat(a, b): # real signature unknown; restored from __doc__
""" a = iconcat(a, b) -- Same as a += b, for a and b sequences. """
pass
def ifloordiv(a, b): # real signature unknown; restored from __doc__
""" a = ifloordiv(a, b) -- Same as a //= b. """
pass
def ilshift(a, b): # real signature unknown; restored from __doc__
""" a = ilshift(a, b) -- Same as a <<= b. """
pass
def imod(a, b): # real signature unknown; restored from __doc__
""" a = imod(a, b) -- Same as a %= b. """
pass
def imul(a, b): # real signature unknown; restored from __doc__
""" a = imul(a, b) -- Same as a *= b. """
pass
def index(a): # real signature unknown; restored from __doc__
""" index(a) -- Same as a.__index__() """
pass
def indexOf(a, b): # real signature unknown; restored from __doc__
""" indexOf(a, b) -- Return the first index of b in a. """
pass
def inv(a): # real signature unknown; restored from __doc__
""" inv(a) -- Same as ~a. """
pass
def invert(a): # real signature unknown; restored from __doc__
""" invert(a) -- Same as ~a. """
pass
def ior(a, b): # real signature unknown; restored from __doc__
""" a = ior(a, b) -- Same as a |= b. """
pass
def ipow(a, b): # real signature unknown; restored from __doc__
""" a = ipow(a, b) -- Same as a **= b. """
pass
def irshift(a, b): # real signature unknown; restored from __doc__
""" a = irshift(a, b) -- Same as a >>= b. """
pass
def isub(a, b): # real signature unknown; restored from __doc__
""" a = isub(a, b) -- Same as a -= b. """
pass
def is_(a, b): # real signature unknown; restored from __doc__
""" is_(a, b) -- Same as a is b. """
pass
def is_not(a, b): # real signature unknown; restored from __doc__
""" is_not(a, b) -- Same as a is not b. """
pass
def itruediv(a, b): # real signature unknown; restored from __doc__
""" a = itruediv(a, b) -- Same as a /= b """
pass
def ixor(a, b): # real signature unknown; restored from __doc__
""" a = ixor(a, b) -- Same as a ^= b. """
pass
def le(a, b): # real signature unknown; restored from __doc__
""" le(a, b) -- Same as a<=b. """
pass
def length_hint(obj, default=0): # real signature unknown; restored from __doc__
"""
length_hint(obj, default=0) -> int
Return an estimate of the number of items in obj.
This is useful for presizing containers when building from an
iterable.
If the object supports len(), the result will be
exact. Otherwise, it may over- or under-estimate by an
arbitrary amount. The result will be an integer >= 0.
"""
return 0
def lshift(a, b): # real signature unknown; restored from __doc__
""" lshift(a, b) -- Same as a << b. """
pass
def lt(a, b): # real signature unknown; restored from __doc__
""" lt(a, b) -- Same as a<b. """
pass
def mod(a, b): # real signature unknown; restored from __doc__
""" mod(a, b) -- Same as a % b. """
pass
def mul(a, b): # real signature unknown; restored from __doc__
""" mul(a, b) -- Same as a * b. """
pass
def ne(a, b): # real signature unknown; restored from __doc__
""" ne(a, b) -- Same as a!=b. """
pass
def neg(a): # real signature unknown; restored from __doc__
""" neg(a) -- Same as -a. """
pass
def not_(a): # real signature unknown; restored from __doc__
""" not_(a) -- Same as not a. """
pass
def or_(a, b): # real signature unknown; restored from __doc__
""" or_(a, b) -- Same as a | b. """
pass
def pos(a): # real signature unknown; restored from __doc__
""" pos(a) -- Same as +a. """
pass
def pow(a, b): # real signature unknown; restored from __doc__
""" pow(a, b) -- Same as a ** b. """
pass
def rshift(a, b): # real signature unknown; restored from __doc__
""" rshift(a, b) -- Same as a >> b. """
pass
def setitem(a, b, c): # real signature unknown; restored from __doc__
""" setitem(a, b, c) -- Same as a[b] = c. """
pass
def sub(a, b): # real signature unknown; restored from __doc__
""" sub(a, b) -- Same as a - b. """
pass
def truediv(a, b): # real signature unknown; restored from __doc__
""" truediv(a, b) -- Same as a / b. """
pass
def truth(a): # real signature unknown; restored from __doc__
""" truth(a) -- Return True if a is true, False otherwise. """
pass
def xor(a, b): # real signature unknown; restored from __doc__
""" xor(a, b) -- Same as a ^ b. """
pass
def _compare_digest(*args, **kwargs): # real signature unknown
"""
compare_digest(a, b) -> bool
Return 'a == b'. This function uses an approach designed to prevent
timing analysis, making it appropriate for cryptography.
a and b must both be of the same type: either str (ASCII only),
or any type that supports the buffer protocol (e.g. bytes).
Note: If a and b are of different lengths, or if an error occurs,
a timing attack could theoretically reveal information about the
types and lengths of a and b--but not their values.
"""
pass
# classes
from .object import object
class attrgetter(object):
"""
attrgetter(attr, ...) --> attrgetter object
Return a callable object that fetches the given attribute(s) from its operand.
After f = attrgetter('name'), the call f(r) returns r.name.
After g = attrgetter('name', 'date'), the call g(r) returns (r.name, r.date).
After h = attrgetter('name.first', 'name.last'), the call h(r) returns
(r.name.first, r.name.last).
"""
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, attr, *more): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
from .object import object
class itemgetter(object):
"""
itemgetter(item, ...) --> itemgetter object
Return a callable object that fetches the given item(s) from its operand.
After f = itemgetter(2), the call f(r) returns r[2].
After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3])
"""
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, item, *more): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
from .object import object
class methodcaller(object):
"""
methodcaller(name, ...) --> methodcaller object
Return a callable object that calls the given method on its operand.
After f = methodcaller('name'), the call f(r) returns r.name().
After g = methodcaller('name', 'date', foo=1), the call g(r) returns
r.name('date', foo=1).
"""
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, name, *more): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
from .object import object
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
""" Load a built-in module. """
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
__spec__ = None # (!) real value is ''
| gpl-2.0 | -966,062,011,599,680,600 | 29.809769 | 101 | 0.585982 | false |
ZhangAustin/deepy | deepy/layers/recurrent.py | 3 | 6446 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import NeuralLayer
from deepy.utils import build_activation, FLOATX
import numpy as np
import theano
import theano.tensor as T
from collections import OrderedDict
OUTPUT_TYPES = ["sequence", "one"]
INPUT_TYPES = ["sequence", "one"]
class RNN(NeuralLayer):
"""
Recurrent neural network layer.
"""
def __init__(self, hidden_size, input_type="sequence", output_type="sequence", vector_core=None,
hidden_activation="tanh", hidden_init=None, input_init=None, steps=None,
persistent_state=False, reset_state_for_input=None, batch_size=None,
go_backwards=False, mask=None, second_input_size=None, second_input=None):
super(RNN, self).__init__("rnn")
self._hidden_size = hidden_size
self.output_dim = self._hidden_size
self._input_type = input_type
self._output_type = output_type
self._hidden_activation = hidden_activation
self._hidden_init = hidden_init
self._vector_core = vector_core
self._input_init = input_init
self.persistent_state = persistent_state
self.reset_state_for_input = reset_state_for_input
self.batch_size = batch_size
self._steps = steps
self._go_backwards = go_backwards
self._mask = mask.dimshuffle((1,0)) if mask else None
self._second_input_size = second_input_size
self._second_input = second_input
self._sequence_map = OrderedDict()
if input_type not in INPUT_TYPES:
raise Exception("Input type of RNN is wrong: %s" % input_type)
if output_type not in OUTPUT_TYPES:
raise Exception("Output type of RNN is wrong: %s" % output_type)
if self.persistent_state and not self.batch_size:
raise Exception("Batch size must be set for persistent state mode")
if mask and input_type == "one":
raise Exception("Mask only works with sequence input")
def _hidden_preact(self, h):
return T.dot(h, self.W_h) if not self._vector_core else h * self.W_h
def step(self, *vars):
# Parse sequence
sequence_map = dict(zip(self._sequence_map.keys(), vars[:len(self._sequence_map)]))
if self._input_type == "sequence":
x = sequence_map["x"]
h = vars[-1]
# Reset part of the state on condition
if self.reset_state_for_input != None:
h = h * T.neq(x[:, self.reset_state_for_input], 1).dimshuffle(0, 'x')
# RNN core step
z = x + self._hidden_preact(h) + self.B_h
else:
h = vars[-1]
z = self._hidden_preact(h) + self.B_h
# Second input
if "second_input" in sequence_map:
z += sequence_map["second_input"]
new_h = self._hidden_act(z)
# Apply mask
if "mask" in sequence_map:
mask = sequence_map["mask"].dimshuffle(0, 'x')
new_h = mask * new_h + (1 - mask) * h
return new_h
def produce_input_sequences(self, x, mask=None, second_input=None):
self._sequence_map.clear()
if self._input_type == "sequence":
self._sequence_map["x"] = T.dot(x, self.W_i)
# Mask
if mask:
# (batch)
self._sequence_map["mask"] = mask
elif self._mask:
# (time, batch)
self._sequence_map["mask"] = self._mask
# Second input
if second_input:
self._sequence_map["second_input"] = T.dot(second_input, self.W_i2)
elif self._second_input:
self._sequence_map["second_input"] = T.dot(self._second_input, self.W_i2)
return self._sequence_map.values()
def produce_initial_states(self, x):
h0 = T.alloc(np.cast[FLOATX](0.), x.shape[0], self._hidden_size)
if self._input_type == "sequence":
if self.persistent_state:
h0 = self.state
else:
h0 = x
return [h0]
def output(self, x):
if self._input_type == "sequence":
# Move middle dimension to left-most position
# (sequence, batch, value)
sequences = self.produce_input_sequences(x.dimshuffle((1,0,2)))
else:
sequences = self.produce_input_sequences(None)
step_outputs = self.produce_initial_states(x)
hiddens, _ = theano.scan(self.step, sequences=sequences, outputs_info=step_outputs,
n_steps=self._steps, go_backwards=self._go_backwards)
# Save persistent state
if self.persistent_state:
self.register_updates((self.state, hiddens[-1]))
if self._output_type == "one":
return hiddens[-1]
elif self._output_type == "sequence":
return hiddens.dimshuffle((1,0,2))
def setup(self):
if self._input_type == "one" and self.input_dim != self._hidden_size:
raise Exception("For RNN receives one vector as input, "
"the hidden size should be same as last output dimension.")
self._setup_params()
self._setup_functions()
def _setup_functions(self):
self._hidden_act = build_activation(self._hidden_activation)
def _setup_params(self):
if not self._vector_core:
self.W_h = self.create_weight(self._hidden_size, self._hidden_size, suffix="h", initializer=self._hidden_init)
else:
self.W_h = self.create_bias(self._hidden_size, suffix="h")
self.W_h.set_value(self.W_h.get_value() + self._vector_core)
self.B_h = self.create_bias(self._hidden_size, suffix="h")
self.register_parameters(self.W_h, self.B_h)
if self.persistent_state:
self.state = self.create_matrix(self.batch_size, self._hidden_size, "rnn_state")
self.register_free_parameters(self.state)
else:
self.state = None
if self._input_type == "sequence":
self.W_i = self.create_weight(self.input_dim, self._hidden_size, suffix="i", initializer=self._input_init)
self.register_parameters(self.W_i)
if self._second_input_size:
self.W_i2 = self.create_weight(self._second_input_size, self._hidden_size, suffix="i2", initializer=self._input_init)
self.register_parameters(self.W_i2)
| mit | 2,185,468,890,153,826,000 | 40.057325 | 129 | 0.581601 | false |
ChristosChristofidis/twitter_nlp | python/chunk_tagger_stdin.py | 9 | 2094 | #!/usr/bin/python
import sys
import os
import re
import subprocess
import time
from signal import *
#BASE_DIR = '/home/aritter/twitter_nlp'
#BASE_DIR = os.environ['HOME'] + '/twitter_nlp'
#BASE_DIR = '/homes/gws/aritter/twitter_nlp'
BASE_DIR = 'twitter_nlp.jar'
if os.environ.has_key('TWITTER_NLP'):
BASE_DIR = os.environ['TWITTER_NLP']
sys.path.append('%s/python' % (BASE_DIR))
import chunking_features
_MODEL_FP = ('%s/models/chunk/200Kptb_14Ktwit.model' % (BASE_DIR))
_CLUSTERS = '%s/data/brown_clusters/60K_clusters.txt' % (BASE_DIR)
class ChunkTagger:
def __init__(self):
self.GetTagger()
self.nTagged = 0
def GetTagger(self):
self.tagger = subprocess.Popen('java -Xmx400m -cp %s/mallet-2.0.6/lib/mallet-deps.jar:%s/mallet-2.0.6/class cc.mallet.fst.SimpleTaggerStdin --model-file %s' % (BASE_DIR, BASE_DIR, _MODEL_FP),
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def TagSentence(self, word_pos):
#if self.nTagged % 1000 == 0:
if self.nTagged % 500 == 0:
self.tagger.stdin.close()
self.tagger.stdout.close()
#self.tagger.kill()
os.kill(self.tagger.pid, SIGTERM) #Need to do this for python 2.4
self.tagger.wait()
self.GetTagger()
feat_list = []
for i in range(len(word_pos)):
features = chunking_features.nltk_features(word_pos, i)
features.extend(chunking_features.turian_features(word_pos, i))
feat_list.append(features)
# Create string to feed into Mallet
feat_list_str = []
for word_feats in feat_list:
feat_list_str.append(' '.join(word_feats))
self.tagger.stdin.write(("\t".join(feat_list_str) + "\n").encode('utf8'))
chunks = []
for i in range(len(feat_list)):
chunks.append(self.tagger.stdout.readline().rstrip('\n').strip(' '))
self.nTagged += 1
return chunks
| gpl-3.0 | 879,652,631,219,626,800 | 31.215385 | 199 | 0.579274 | false |
DennyZhang/devops_public | python/java_tool/java_analyze.py | 3 | 6390 | #!/usr/bin/python
##-------------------------------------------------------------------
## File : java_analyze.py
## Description :
## --
## Created : <2017-01-25>
## Updated: Time-stamp: <2017-07-18 10:18:59>
##-------------------------------------------------------------------
import sys, os
import argparse
import requests, json
################################################################################
# Common functions
def analyze_gc_logfile(gc_logfile, apikey):
print("Call rest api to parse gc log: http://www.gceasy.io.")
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
url = "http://api.gceasy.io/analyzeGC?apiKey=%s" % (apikey)
res = requests.post(url, data=open(gc_logfile, "r"), headers=headers)
if res.status_code != 200:
print("ERROR: http response is not 200 OK. status_code: %d. content: %s..." \
% (res.status_code, res.content[0:40]))
return False
content = res.content
l = json.loads(content)
print("graphURL: %s" % (l["graphURL"]))
if '"isProblem":true' in content:
print("ERROR: problem is found.")
return False
return True
def analyze_jstack_logfile(jstack_logfile, apikey, min_runnable_percentage):
print("Call rest api to parse java jstack log: http://www.fastthread.io.")
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
url = "http://api.fastthread.io/fastthread-api?apiKey=%s" % (apikey)
res = requests.post(url, data=open(jstack_logfile, "r"), headers=headers)
if res.status_code != 200:
print("ERROR: http response is not 200 OK. status_code: %d. content: %s..." \
% (res.status_code, res.content[0:40]))
return False
content = res.content
l = json.loads(content)
threadstate = l["threadDumpReport"][0]["threadState"]
threadcount_runnable = int(threadstate[0]["threadCount"])
threadcount_waiting = int(threadstate[1]["threadCount"])
threadcount_timed_waiting = int(threadstate[2]["threadCount"])
threadcount_blocked = int(threadstate[3]["threadCount"])
print("%d threads in RUNNABLE, %d in WAITING, %d in TIMED_WAITING, %d in BLOCKED." \
% (threadcount_runnable, threadcount_waiting, threadcount_timed_waiting, threadcount_blocked))
print("graphURL: %s" % (l["graphURL"]))
threadcount_total = threadcount_runnable + threadcount_waiting + \
threadcount_timed_waiting + threadcount_blocked
if (float(threadcount_runnable)/threadcount_total) < min_runnable_percentage:
print("ERROR: only %s threads are in RUNNABLE state. Less than %s." % \
("{0:.2f}%".format(float(threadcount_runnable)*100/threadcount_total), \
"{0:.2f}%".format(min_runnable_percentage*100)))
return False
return True
################################################################################
## Generate gc log: start java program with -Xloggc enabled
## Generate java jstack log: jstack -l $java_pid
##
## Sample: Run with environment variables.
##
## # analyze gc logfile:
## export JAVA_ANALYZE_ACTION="analyze_gc_logfile"
## export JAVA_ANALYZE_LOGFILE="/tmp/gc.log"
## export JAVA_ANALYZE_APIKEY="29792f0d-5d5f-43ad-9358..."
## curl -L https://raw.githubusercontent.com/.../java_analyze.py | bash
##
## # analyze jstack logfile:
## export JAVA_ANALYZE_ACTION="analyze_jstack_logfile"
## export JAVA_ANALYZE_LOGFILE="/tmp/jstack.log"
## export JAVA_ANALYZE_APIKEY="29792f0d-5d5f-43ad-9358..."
## curl -L https://raw.githubusercontent.com/.../java_analyze.py | bash
##
## Sample: Run with argument parameters.
## python ./java_analyze.py --action analyze_gc_logfile \\
## --logfile /tmp/gc.log --apikey "29792f0d..."
## python ./java_analyze.py --action analyze_jstack_logfile \
## --logfile /tmp/jstack.log --apikey "29792f0d..."
##
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--action', default='', required=False, \
help="Supported action: analyze_gc_logfile or analyze_jstack_logfile", \
type=str)
parser.add_argument('--logfile', default='', required=False, \
help="Critical log file to parse", type=str)
parser.add_argument('--apikey', default='', required=False, \
help="API key to call gceasy.io and fastthread.io", \
type=str)
parser.add_argument('--minrunnable', default=0.40, required=False, \
help="If too many threads are not in RUNNABLE state, we raise alerts", \
type=float)
l = parser.parse_args()
action = l.action
logfile = l.logfile
apikey = l.apikey
# Get parameters via environment variables, if missing
if action == "" or action is None:
action = os.environ.get('JAVA_ANALYZE_ACTION')
if logfile == "" or logfile is None:
logfile = os.environ.get('JAVA_ANALYZE_LOGFILE')
if apikey == "" or apikey is None:
apikey = os.environ.get('JAVA_ANALYZE_APIKEY')
# input parameters check
if action == "" or action is None:
print("ERROR: mandatory parameter of action is not given.")
sys.exit(1)
if logfile == "" or logfile is None:
print("ERROR: mandatory parameter of logfile is not given.")
sys.exit(1)
if apikey == "" or apikey is None:
print("ERROR: mandatory parameter of apikey is not given.")
sys.exit(1)
############################################################################
# main logic
if action == "analyze_gc_logfile":
if analyze_gc_logfile(logfile, apikey) is False:
print("ERROR: problems are detected in gc log(%s)." % (logfile))
sys.exit(1)
else:
print("OK: no problem found when parsing gc log(%s)." % (logfile))
elif action == "analyze_jstack_logfile":
if analyze_jstack_logfile(logfile, apikey, l.minrunnable) is False:
print("ERROR: problems are detected in jstack log(%s)." % (logfile))
sys.exit(1)
else:
print("OK: no problem found when parsing jstack log(%s)." % (logfile))
else:
print("ERROR: not supported action(%s)." % (action))
sys.exit(1)
## File : java_analyze.py ends
| mit | -6,139,812,882,418,273,000 | 42.175676 | 104 | 0.586228 | false |
tcpcloud/openvstorage | ovs/extensions/storage/volatile/dummystore.py | 1 | 3090 | # Copyright 2014 Open vStorage NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dummy volatile module
"""
import time
import json
class DummyVolatileStore(object):
"""
This is a dummy volatile store that makes use of a local json file
"""
_path = '/run/dummyvolatile.json'
_storage = {}
_timeout = {}
@staticmethod
def clean():
"""
Empties the store
"""
import os
try:
os.remove(DummyVolatileStore._path)
except OSError:
pass
def _read(self):
"""
Reads the local json file
"""
try:
f = open(self._path, 'r')
data = json.loads(f.read())
f.close()
except IOError:
data = {'t': {}, 's': {}}
return data
def get(self, key, default=None):
"""
Retrieves a certain value for a given key
"""
data = self._read()
if key in data['t'] and data['t'][key] > time.time():
value = data['s'].get(key)
if 'ovs_primarykeys_' in key:
value[0] = set(value[0])
return value
return default
def set(self, key, value, timeout=99999999):
"""
Sets the value for a key to a given value
"""
if 'ovs_primarykeys_' in key:
value[0] = list(value[0])
data = self._read()
data['s'][key] = value
data['t'][key] = time.time() + timeout
self._save(data)
def add(self, key, value, timeout=99999999):
"""
Adds a given key to the store, expecting the key does not exists yet
"""
data = self._read()
if key not in data['s']:
self.set(key, value, timeout)
return True
else:
return False
def delete(self, key):
"""
Deletes a given key from the store
"""
data = self._read()
if key in data['s']:
del data['s'][key]
del data['t'][key]
self._save(data)
def incr(self, key, delta=1):
"""
Increments the value of the key, expecting it exists
"""
data = self._read()
if key in data['s']:
data['s'][key] += delta
self._save(data)
return True
return False
def _save(self, data):
"""
Saves the local json file
"""
rawdata = json.dumps(data, sort_keys=True, indent=2)
f = open(self._path, 'w+')
f.write(rawdata)
f.close()
| apache-2.0 | 2,469,989,809,427,970,600 | 25.637931 | 76 | 0.530744 | false |
dchaplinsky/declarations.com.ua | declarations_site/spotter/management/commands/runsearchtasks.py | 1 | 2812 | import logging
from time import time
from threading import Lock, get_ident
from concurrent.futures import ThreadPoolExecutor
from django.core.management.base import BaseCommand
from spotter.models import SearchTask
from spotter.utils import do_search, create_report, create_notify
logger = logging.getLogger(__name__)
class DummyLock(object):
"""Dummy lock object for single thread mode"""
def acquire(self):
pass
def release(self):
pass
class Command(BaseCommand):
help = 'Run registered search tasks and send notifications'
def add_arguments(self, parser):
parser.add_argument(
'--concurrency', action='store',
dest='concurrency', type=int, default=0,
help='Run concurrently in N threads',
)
def handle(self, *args, **options):
if 'verbosity' in options:
verbosity = int(options['verbosity'])
levels = (logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)
logging.basicConfig(format='%(message)s', level=levels[verbosity])
concurrency = int(options.get('concurrency', 0))
self.lock = Lock() if concurrency else DummyLock()
self.counters = {'success': 0, 'failed': 0, 'emails': 0}
start_time = time()
query = SearchTask.objects.filter(is_enabled=True, is_deleted=False, user__is_active=True)
if concurrency:
with ThreadPoolExecutor(max_workers=concurrency) as executor:
executor.map(self.run_search_task, query)
else:
for task in query:
self.run_search_task(task)
self.counters['worktime'] = int(time() - start_time)
logger.info("Total {success} success {failed} failed {emails} emails sent in {worktime} sec."
.format(**self.counters))
def inc_counter(self, counter_name):
self.lock.acquire()
try:
self.counters[counter_name] += 1
finally:
self.lock.release()
def run_search_task(self, task):
try:
thread_id = get_ident()
logger.info("Thread-%d Process %s %s", thread_id, task.user, task.query)
search = do_search(task)
report = create_report(task, search)
logger.info("Thread-%d Found %d new %d", thread_id, report.found_total, report.found_new)
if report.found_new:
notify = create_notify(task, report)
logger.info("Thread-%d Send notify %s %s", thread_id, notify.email, notify.error)
self.inc_counter('emails')
self.inc_counter('success')
except Exception as e:
logger.exception("Thread-%d run_search_task(%d) %s", thread_id, task.id, str(e))
self.inc_counter('failed')
| mit | -3,874,970,097,778,877,400 | 34.594937 | 101 | 0.610242 | false |
nyalldawson/QGIS | tests/src/python/test_qgsprojectbadlayers.py | 15 | 15588 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsProject bad layers handling.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import chr
from builtins import range
__author__ = 'Alessandro Pasotti'
__date__ = '20/10/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import os
import filecmp
import qgis # NOQA
from qgis.core import (QgsProject,
QgsVectorLayer,
QgsCoordinateTransform,
QgsMapSettings,
QgsRasterLayer,
QgsMapLayer,
QgsRectangle,
QgsDataProvider,
QgsReadWriteContext,
QgsCoordinateReferenceSystem,
)
from qgis.gui import (QgsLayerTreeMapCanvasBridge,
QgsMapCanvas)
from qgis.PyQt.QtGui import QFont, QColor
from qgis.PyQt.QtTest import QSignalSpy
from qgis.PyQt.QtCore import QT_VERSION_STR, QTemporaryDir, QSize
from qgis.PyQt.QtXml import QDomDocument, QDomNode
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath, renderMapToImage)
from shutil import copyfile
app = start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsProjectBadLayers(unittest.TestCase):
def setUp(self):
p = QgsProject.instance()
p.removeAllMapLayers()
@classmethod
def getBaseMapSettings(cls):
"""
:rtype: QgsMapSettings
"""
ms = QgsMapSettings()
crs = QgsCoordinateReferenceSystem('epsg:4326')
ms.setBackgroundColor(QColor(152, 219, 249))
ms.setOutputSize(QSize(420, 280))
ms.setOutputDpi(72)
ms.setFlag(QgsMapSettings.Antialiasing, True)
ms.setFlag(QgsMapSettings.UseAdvancedEffects, False)
ms.setFlag(QgsMapSettings.ForceVectorOutput, False) # no caching?
ms.setDestinationCrs(crs)
return ms
def _change_data_source(self, layer, datasource, provider_key):
"""Due to the fact that a project r/w context is not available inside
the map layers classes, the original style and subset string restore
happens in app, this function replicates app behavior"""
options = QgsDataProvider.ProviderOptions()
subset_string = ''
if not layer.isValid():
try:
subset_string = layer.dataProvider().subsetString()
except:
pass
layer.setDataSource(datasource, layer.name(), provider_key, options)
if subset_string:
layer.setSubsetString(subset_string)
self.assertTrue(layer.originalXmlProperties(), layer.name())
context = QgsReadWriteContext()
context.setPathResolver(QgsProject.instance().pathResolver())
errorMsg = ''
doc = QDomDocument()
self.assertTrue(doc.setContent(layer.originalXmlProperties()))
layer_node = QDomNode(doc.firstChild())
self.assertTrue(layer.readSymbology(layer_node, errorMsg, context))
def test_project_roundtrip(self):
"""Tests that a project with bad layers can be saved and restored"""
p = QgsProject.instance()
temp_dir = QTemporaryDir()
for ext in ('shp', 'dbf', 'shx', 'prj'):
copyfile(os.path.join(TEST_DATA_DIR, 'lines.%s' % ext), os.path.join(temp_dir.path(), 'lines.%s' % ext))
copyfile(os.path.join(TEST_DATA_DIR, 'raster', 'band1_byte_ct_epsg4326.tif'), os.path.join(temp_dir.path(), 'band1_byte_ct_epsg4326.tif'))
copyfile(os.path.join(TEST_DATA_DIR, 'raster', 'band1_byte_ct_epsg4326.tif'), os.path.join(temp_dir.path(), 'band1_byte_ct_epsg4326_copy.tif'))
l = QgsVectorLayer(os.path.join(temp_dir.path(), 'lines.shp'), 'lines', 'ogr')
self.assertTrue(l.isValid())
rl = QgsRasterLayer(os.path.join(temp_dir.path(), 'band1_byte_ct_epsg4326.tif'), 'raster', 'gdal')
self.assertTrue(rl.isValid())
rl_copy = QgsRasterLayer(os.path.join(temp_dir.path(), 'band1_byte_ct_epsg4326_copy.tif'), 'raster_copy', 'gdal')
self.assertTrue(rl_copy.isValid())
self.assertTrue(p.addMapLayers([l, rl, rl_copy]))
# Save project
project_path = os.path.join(temp_dir.path(), 'project.qgs')
self.assertTrue(p.write(project_path))
# Re-load the project, checking for the XML properties
p.removeAllMapLayers()
self.assertTrue(p.read(project_path))
vector = list(p.mapLayersByName('lines'))[0]
raster = list(p.mapLayersByName('raster'))[0]
raster_copy = list(p.mapLayersByName('raster_copy'))[0]
self.assertTrue(vector.originalXmlProperties() != '')
self.assertTrue(raster.originalXmlProperties() != '')
self.assertTrue(raster_copy.originalXmlProperties() != '')
# Test setter
raster.setOriginalXmlProperties('pippo')
self.assertEqual(raster.originalXmlProperties(), 'pippo')
# Now create and invalid project:
bad_project_path = os.path.join(temp_dir.path(), 'project_bad.qgs')
with open(project_path, 'r') as infile:
with open(bad_project_path, 'w+') as outfile:
outfile.write(infile.read().replace('./lines.shp', './lines-BAD_SOURCE.shp').replace('band1_byte_ct_epsg4326_copy.tif', 'band1_byte_ct_epsg4326_copy-BAD_SOURCE.tif'))
# Load the bad project
p.removeAllMapLayers()
self.assertTrue(p.read(bad_project_path))
# Check layer is invalid
vector = list(p.mapLayersByName('lines'))[0]
raster = list(p.mapLayersByName('raster'))[0]
raster_copy = list(p.mapLayersByName('raster_copy'))[0]
self.assertIsNotNone(vector.dataProvider())
self.assertIsNotNone(raster.dataProvider())
self.assertIsNotNone(raster_copy.dataProvider())
self.assertFalse(vector.isValid())
self.assertFalse(raster_copy.isValid())
# Try a getFeatures
self.assertEqual([f for f in vector.getFeatures()], [])
self.assertTrue(raster.isValid())
self.assertEqual(vector.providerType(), 'ogr')
# Save the project
bad_project_path2 = os.path.join(temp_dir.path(), 'project_bad2.qgs')
p.write(bad_project_path2)
# Re-save the project, with fixed paths
good_project_path = os.path.join(temp_dir.path(), 'project_good.qgs')
with open(bad_project_path2, 'r') as infile:
with open(good_project_path, 'w+') as outfile:
outfile.write(infile.read().replace('./lines-BAD_SOURCE.shp', './lines.shp').replace('band1_byte_ct_epsg4326_copy-BAD_SOURCE.tif', 'band1_byte_ct_epsg4326_copy.tif'))
# Load the good project
p.removeAllMapLayers()
self.assertTrue(p.read(good_project_path))
# Check layer is valid
vector = list(p.mapLayersByName('lines'))[0]
raster = list(p.mapLayersByName('raster'))[0]
raster_copy = list(p.mapLayersByName('raster_copy'))[0]
self.assertTrue(vector.isValid())
self.assertTrue(raster.isValid())
self.assertTrue(raster_copy.isValid())
def test_project_relations(self):
"""Tests that a project with bad layers and relations can be saved with relations"""
temp_dir = QTemporaryDir()
p = QgsProject.instance()
for ext in ('qgs', 'gpkg'):
copyfile(os.path.join(TEST_DATA_DIR, 'projects', 'relation_reference_test.%s' % ext), os.path.join(temp_dir.path(), 'relation_reference_test.%s' % ext))
# Load the good project
project_path = os.path.join(temp_dir.path(), 'relation_reference_test.qgs')
p.removeAllMapLayers()
self.assertTrue(p.read(project_path))
point_a = list(p.mapLayersByName('point_a'))[0]
point_b = list(p.mapLayersByName('point_b'))[0]
point_a_source = point_a.publicSource()
point_b_source = point_b.publicSource()
self.assertTrue(point_a.isValid())
self.assertTrue(point_b.isValid())
# Check relations
def _check_relations():
relation = list(p.relationManager().relations().values())[0]
self.assertTrue(relation.isValid())
self.assertEqual(relation.referencedLayer().id(), point_b.id())
self.assertEqual(relation.referencingLayer().id(), point_a.id())
_check_relations()
# Now build a bad project
bad_project_path = os.path.join(temp_dir.path(), 'relation_reference_test_bad.qgs')
with open(project_path, 'r') as infile:
with open(bad_project_path, 'w+') as outfile:
outfile.write(infile.read().replace('./relation_reference_test.gpkg', './relation_reference_test-BAD_SOURCE.gpkg'))
# Load the bad project
p.removeAllMapLayers()
self.assertTrue(p.read(bad_project_path))
point_a = list(p.mapLayersByName('point_a'))[0]
point_b = list(p.mapLayersByName('point_b'))[0]
self.assertFalse(point_a.isValid())
self.assertFalse(point_b.isValid())
# This fails because relations are not valid anymore
with self.assertRaises(AssertionError):
_check_relations()
# Changing data source, relations should be restored:
point_a.setDataSource(point_a_source, 'point_a', 'ogr')
point_b.setDataSource(point_b_source, 'point_b', 'ogr')
self.assertTrue(point_a.isValid())
self.assertTrue(point_b.isValid())
# Check if relations were restored
_check_relations()
# Reload the bad project
p.removeAllMapLayers()
self.assertTrue(p.read(bad_project_path))
point_a = list(p.mapLayersByName('point_a'))[0]
point_b = list(p.mapLayersByName('point_b'))[0]
self.assertFalse(point_a.isValid())
self.assertFalse(point_b.isValid())
# This fails because relations are not valid anymore
with self.assertRaises(AssertionError):
_check_relations()
# Save the bad project
bad_project_path2 = os.path.join(temp_dir.path(), 'relation_reference_test_bad2.qgs')
p.write(bad_project_path2)
# Now fix the bad project
bad_project_path_fixed = os.path.join(temp_dir.path(), 'relation_reference_test_bad_fixed.qgs')
with open(bad_project_path2, 'r') as infile:
with open(bad_project_path_fixed, 'w+') as outfile:
outfile.write(infile.read().replace('./relation_reference_test-BAD_SOURCE.gpkg', './relation_reference_test.gpkg'))
# Load the fixed project
p.removeAllMapLayers()
self.assertTrue(p.read(bad_project_path_fixed))
point_a = list(p.mapLayersByName('point_a'))[0]
point_b = list(p.mapLayersByName('point_b'))[0]
point_a_source = point_a.publicSource()
point_b_source = point_b.publicSource()
self.assertTrue(point_a.isValid())
self.assertTrue(point_b.isValid())
_check_relations()
def testStyles(self):
"""Test that styles for rasters and vectors are kept when setDataSource is called"""
temp_dir = QTemporaryDir()
p = QgsProject.instance()
for f in (
'bad_layer_raster_test.tfw',
'bad_layer_raster_test.tiff',
'bad_layer_raster_test.tiff.aux.xml',
'bad_layers_test.gpkg',
'good_layers_test.qgs'):
copyfile(os.path.join(TEST_DATA_DIR, 'projects', f), os.path.join(temp_dir.path(), f))
project_path = os.path.join(temp_dir.path(), 'good_layers_test.qgs')
p = QgsProject().instance()
p.removeAllMapLayers()
self.assertTrue(p.read(project_path))
self.assertEqual(p.count(), 4)
ms = self.getBaseMapSettings()
point_a_copy = list(p.mapLayersByName('point_a copy'))[0]
point_a = list(p.mapLayersByName('point_a'))[0]
point_b = list(p.mapLayersByName('point_b'))[0]
raster = list(p.mapLayersByName('bad_layer_raster_test'))[0]
self.assertTrue(point_a_copy.isValid())
self.assertTrue(point_a.isValid())
self.assertTrue(point_b.isValid())
self.assertTrue(raster.isValid())
ms.setExtent(QgsRectangle(2.81861, 41.98138, 2.81952, 41.9816))
ms.setLayers([point_a_copy, point_a, point_b, raster])
image = renderMapToImage(ms)
self.assertTrue(image.save(os.path.join(temp_dir.path(), 'expected.png'), 'PNG'))
point_a_source = point_a.publicSource()
point_b_source = point_b.publicSource()
raster_source = raster.publicSource()
self._change_data_source(point_a, point_a_source, 'ogr')
# Attention: we are not passing the subset string here:
self._change_data_source(point_a_copy, point_a_source, 'ogr')
self._change_data_source(point_b, point_b_source, 'ogr')
self._change_data_source(raster, raster_source, 'gdal')
self.assertTrue(image.save(os.path.join(temp_dir.path(), 'actual.png'), 'PNG'))
self.assertTrue(filecmp.cmp(os.path.join(temp_dir.path(), 'actual.png'), os.path.join(temp_dir.path(), 'expected.png')), False)
# Now build a bad project
p.removeAllMapLayers()
bad_project_path = os.path.join(temp_dir.path(), 'bad_layers_test.qgs')
with open(project_path, 'r') as infile:
with open(bad_project_path, 'w+') as outfile:
outfile.write(infile.read().replace('./bad_layers_test.', './bad_layers_test-BAD_SOURCE.').replace('bad_layer_raster_test.tiff', 'bad_layer_raster_test-BAD_SOURCE.tiff'))
p.removeAllMapLayers()
self.assertTrue(p.read(bad_project_path))
self.assertEqual(p.count(), 4)
point_a_copy = list(p.mapLayersByName('point_a copy'))[0]
point_a = list(p.mapLayersByName('point_a'))[0]
point_b = list(p.mapLayersByName('point_b'))[0]
raster = list(p.mapLayersByName('bad_layer_raster_test'))[0]
self.assertFalse(point_a.isValid())
self.assertFalse(point_a_copy.isValid())
self.assertFalse(point_b.isValid())
self.assertFalse(raster.isValid())
ms.setLayers([point_a_copy, point_a, point_b, raster])
image = renderMapToImage(ms)
self.assertTrue(image.save(os.path.join(temp_dir.path(), 'bad.png'), 'PNG'))
self.assertFalse(filecmp.cmp(os.path.join(temp_dir.path(), 'bad.png'), os.path.join(temp_dir.path(), 'expected.png')), False)
self._change_data_source(point_a, point_a_source, 'ogr')
# We are not passing the subset string!!
self._change_data_source(point_a_copy, point_a_source, 'ogr')
self._change_data_source(point_b, point_b_source, 'ogr')
self._change_data_source(raster, raster_source, 'gdal')
self.assertTrue(point_a.isValid())
self.assertTrue(point_a_copy.isValid())
self.assertTrue(point_b.isValid())
self.assertTrue(raster.isValid())
ms.setLayers([point_a_copy, point_a, point_b, raster])
image = renderMapToImage(ms)
self.assertTrue(image.save(os.path.join(temp_dir.path(), 'actual_fixed.png'), 'PNG'))
self.assertTrue(filecmp.cmp(os.path.join(temp_dir.path(), 'actual_fixed.png'), os.path.join(temp_dir.path(), 'expected.png')), False)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 3,818,613,502,525,002,000 | 43.664756 | 186 | 0.629779 | false |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/site-packages/dask/array/tests/test_array_core.py | 2 | 110011 | from __future__ import absolute_import, division, print_function
import copy
import pytest
np = pytest.importorskip('numpy')
import os
import sys
import time
from distutils.version import LooseVersion
import operator
from operator import add, sub, getitem
from threading import Lock
import warnings
from toolz import merge, countby, concat
from toolz.curried import identity
import dask
import dask.array as da
from dask.base import tokenize, compute_as_if_collection
from dask.delayed import Delayed, delayed
from dask.utils import ignoring, tmpfile, tmpdir, key_split
from dask.utils_test import inc, dec
from dask.array import chunk
from dask.array.core import (getem, getter, top, dotmany, concatenate3,
broadcast_dimensions, Array, stack, concatenate,
from_array, elemwise, broadcast_shapes,
broadcast_to, blockdims_from_blockshape, store,
optimize, from_func, normalize_chunks,
broadcast_chunks, atop, from_delayed,
concatenate_axes, common_blockdim)
from dask.array.utils import assert_eq, same_keys
# temporary until numpy functions migrated
try:
from numpy import nancumsum, nancumprod
except ImportError: # pragma: no cover
import dask.array.numpy_compat as npcompat
nancumsum = npcompat.nancumsum
nancumprod = npcompat.nancumprod
def test_getem():
sol = {('X', 0, 0): (getter, 'X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getter, 'X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getter, 'X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getter, 'X', (slice(0, 2), slice(3, 6)))}
assert getem('X', (2, 3), shape=(4, 6)) == sol
def test_top():
assert top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) == \
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
assert top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk',
numblocks={'x': (2, 2), 'y': (2, 2)}) == \
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
assert top(identity, 'z', '', 'x', 'ij', numblocks={'x': (2, 2)}) ==\
{('z',): (identity, [[('x', 0, 0), ('x', 0, 1)],
[('x', 1, 0), ('x', 1, 1)]])}
def test_top_supports_broadcasting_rules():
assert top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij',
numblocks={'x': (1, 2), 'y': (2, 1)}) == \
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 0)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 0))}
def test_top_literals():
assert top(add, 'z', 'ij', 'x', 'ij', 123, None, numblocks={'x': (2, 2)}) == \
{('z', 0, 0): (add, ('x', 0, 0), 123),
('z', 0, 1): (add, ('x', 0, 1), 123),
('z', 1, 0): (add, ('x', 1, 0), 123),
('z', 1, 1): (add, ('x', 1, 1), 123)}
def test_atop_literals():
x = da.ones((10, 10), chunks=(5, 5))
z = atop(add, 'ij', x, 'ij', 100, None, dtype=x.dtype)
assert_eq(z, x + 100)
z = atop(lambda x, y, z: x * y + z, 'ij', 2, None, x, 'ij', 100, None, dtype=x.dtype)
assert_eq(z, 2 * x + 100)
z = atop(getitem, 'ij', x, 'ij', slice(None), None, dtype=x.dtype)
assert_eq(z, x)
def test_concatenate3_on_scalars():
assert_eq(concatenate3([1, 2]), np.array([1, 2]))
def test_chunked_dot_product():
x = np.arange(400).reshape((20, 20))
o = np.ones((20, 20))
d = {'x': x, 'o': o}
getx = getem('x', (5, 5), shape=(20, 20))
geto = getem('o', (5, 5), shape=(20, 20))
result = top(dotmany, 'out', 'ik', 'x', 'ij', 'o', 'jk',
numblocks={'x': (4, 4), 'o': (4, 4)})
dsk = merge(d, getx, geto, result)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert_eq(np.dot(x, o), concatenate3(out))
def test_chunked_transpose_plus_one():
x = np.arange(400).reshape((20, 20))
d = {'x': x}
getx = getem('x', (5, 5), shape=(20, 20))
f = lambda x: x.T + 1
comp = top(f, 'out', 'ij', 'x', 'ji', numblocks={'x': (4, 4)})
dsk = merge(d, getx, comp)
out = dask.get(dsk, [[('out', i, j) for j in range(4)] for i in range(4)])
assert_eq(concatenate3(out), x.T + 1)
def test_broadcast_dimensions_works_with_singleton_dimensions():
argpairs = [('x', 'i')]
numblocks = {'x': ((1,),)}
assert broadcast_dimensions(argpairs, numblocks) == {'i': (1,)}
def test_broadcast_dimensions():
argpairs = [('x', 'ij'), ('y', 'ij')]
d = {'x': ('Hello', 1), 'y': (1, (2, 3))}
assert broadcast_dimensions(argpairs, d) == {'i': 'Hello', 'j': (2, 3)}
def test_Array():
shape = (1000, 1000)
chunks = (100, 100)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, chunks, shape=shape))
a = Array(dsk, name, chunks, shape=shape, dtype='f8')
assert a.numblocks == (10, 10)
assert a.__dask_keys__() == [[('x', i, j) for j in range(10)]
for i in range(10)]
assert a.chunks == ((100,) * 10, (100,) * 10)
assert a.shape == shape
assert len(a) == shape[0]
def test_uneven_chunks():
a = Array({}, 'x', chunks=(3, 3), shape=(10, 10), dtype='f8')
assert a.chunks == ((3, 3, 3, 1), (3, 3, 3, 1))
def test_numblocks_suppoorts_singleton_block_dims():
shape = (100, 10)
chunks = (10, 10)
name = 'x'
dsk = merge({name: 'some-array'}, getem(name, shape=shape, chunks=chunks))
a = Array(dsk, name, chunks, shape=shape, dtype='f8')
assert set(concat(a.__dask_keys__())) == {('x', i, 0) for i in range(10)}
def test_keys():
dsk = dict((('x', i, j), ()) for i in range(5) for j in range(6))
dx = Array(dsk, 'x', chunks=(10, 10), shape=(50, 60), dtype='f8')
assert dx.__dask_keys__() == [[(dx.name, i, j) for j in range(6)]
for i in range(5)]
# Cache works
assert dx.__dask_keys__() is dx.__dask_keys__()
# Test mutating names clears key cache
dx.dask = {('y', i, j): () for i in range(5) for j in range(6)}
dx.name = 'y'
assert dx.__dask_keys__() == [[(dx.name, i, j) for j in range(6)]
for i in range(5)]
d = Array({}, 'x', (), shape=(), dtype='f8')
assert d.__dask_keys__() == [('x',)]
def test_Array_computation():
a = Array({('x', 0, 0): np.eye(3)}, 'x', shape=(3, 3), chunks=(3, 3), dtype='f8')
assert_eq(np.array(a), np.eye(3))
assert isinstance(a.compute(), np.ndarray)
assert float(a[0, 0]) == 1
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.14.0',
reason="NumPy doesn't have `np.linalg._umath_linalg` yet")
@pytest.mark.xfail(reason="Protect from `np.linalg._umath_linalg.inv` breaking")
def test_Array_numpy_gufunc_call__array_ufunc__01():
x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))
nx = x.compute()
ny = np.linalg._umath_linalg.inv(nx)
y = np.linalg._umath_linalg.inv(x, output_dtypes=float)
vy = y.compute()
assert_eq(ny, vy)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.14.0',
reason="NumPy doesn't have `np.linalg._umath_linalg` yet")
@pytest.mark.xfail(reason="Protect from `np.linalg._umath_linalg.eig` breaking")
def test_Array_numpy_gufunc_call__array_ufunc__02():
x = da.random.normal(size=(3, 10, 10), chunks=(2, 10, 10))
nx = x.compute()
nw, nv = np.linalg._umath_linalg.eig(nx)
w, v = np.linalg._umath_linalg.eig(x, output_dtypes=(float, float))
vw = w.compute()
vv = v.compute()
assert_eq(nw, vw)
assert_eq(nv, vv)
def test_stack():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, chunks=(2, 3), dtype='f8', shape=(4, 6))
for name in 'ABC']
s = stack([a, b, c], axis=0)
colon = slice(None, None, None)
assert s.shape == (3, 4, 6)
assert s.chunks == ((1, 1, 1), (2, 2), (3, 3))
assert s.chunksize == (1, 2, 3)
assert s.dask[(s.name, 0, 1, 0)] == (getitem, ('A', 1, 0),
(None, colon, colon))
assert s.dask[(s.name, 2, 1, 0)] == (getitem, ('C', 1, 0),
(None, colon, colon))
assert same_keys(s, stack([a, b, c], axis=0))
s2 = stack([a, b, c], axis=1)
assert s2.shape == (4, 3, 6)
assert s2.chunks == ((2, 2), (1, 1, 1), (3, 3))
assert s2.chunksize == (2, 1, 3)
assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, ('B', 0, 0),
(colon, None, colon))
assert s2.dask[(s2.name, 1, 1, 0)] == (getitem, ('B', 1, 0),
(colon, None, colon))
assert same_keys(s2, stack([a, b, c], axis=1))
s2 = stack([a, b, c], axis=2)
assert s2.shape == (4, 6, 3)
assert s2.chunks == ((2, 2), (3, 3), (1, 1, 1))
assert s2.chunksize == (2, 3, 1)
assert s2.dask[(s2.name, 0, 1, 0)] == (getitem, ('A', 0, 1),
(colon, colon, None))
assert s2.dask[(s2.name, 1, 1, 2)] == (getitem, ('C', 1, 1),
(colon, colon, None))
assert same_keys(s2, stack([a, b, c], axis=2))
pytest.raises(ValueError, lambda: stack([a, b, c], axis=3))
assert set(b.dask.keys()).issubset(s2.dask.keys())
assert stack([a, b, c], axis=-1).chunks == stack([a, b, c], axis=2).chunks
def test_short_stack():
x = np.array([1])
d = da.from_array(x, chunks=(1,))
s = da.stack([d])
assert s.shape == (1, 1)
chunks = compute_as_if_collection(Array, s.dask, s.__dask_keys__())
assert chunks[0][0].shape == (1, 1)
def test_stack_scalars():
d = da.arange(4, chunks=2)
s = da.stack([d.mean(), d.sum()])
assert s.compute().tolist() == [np.arange(4).mean(), np.arange(4).sum()]
def test_stack_promote_type():
i = np.arange(10, dtype='i4')
f = np.arange(10, dtype='f4')
di = da.from_array(i, chunks=5)
df = da.from_array(f, chunks=5)
res = da.stack([di, df])
assert_eq(res, np.stack([i, f]))
def test_stack_rechunk():
x = da.random.random(10, chunks=5)
y = da.random.random(10, chunks=4)
z = da.stack([x, y], axis=0)
assert z.shape == (2, 10)
assert z.chunks == ((1, 1), (4, 1, 3, 2))
assert_eq(z, np.stack([x.compute(), y.compute()], axis=0))
def test_concatenate():
a, b, c = [Array(getem(name, chunks=(2, 3), shape=(4, 6)),
name, chunks=(2, 3), dtype='f8', shape=(4, 6))
for name in 'ABC']
x = concatenate([a, b, c], axis=0)
assert x.shape == (12, 6)
assert x.chunks == ((2, 2, 2, 2, 2, 2), (3, 3))
assert x.dask[(x.name, 0, 1)] == ('A', 0, 1)
assert x.dask[(x.name, 5, 0)] == ('C', 1, 0)
assert same_keys(x, concatenate([a, b, c], axis=0))
y = concatenate([a, b, c], axis=1)
assert y.shape == (4, 18)
assert y.chunks == ((2, 2), (3, 3, 3, 3, 3, 3))
assert y.dask[(y.name, 1, 0)] == ('A', 1, 0)
assert y.dask[(y.name, 1, 5)] == ('C', 1, 1)
assert same_keys(y, concatenate([a, b, c], axis=1))
assert set(b.dask.keys()).issubset(y.dask.keys())
z = concatenate([a], axis=0)
assert z.shape == a.shape
assert z.chunks == a.chunks
assert z.dask == a.dask
assert z is a
assert (concatenate([a, b, c], axis=-1).chunks ==
concatenate([a, b, c], axis=1).chunks)
pytest.raises(ValueError, lambda: concatenate([a, b, c], axis=2))
@pytest.mark.parametrize('dtypes', [(('>f8', '>f8'), '>f8'),
(('<f4', '<f8'), '<f8')])
def test_concatenate_types(dtypes):
dts_in, dt_out = dtypes
arrs = [np.zeros(4, dtype=dt) for dt in dts_in]
darrs = [from_array(arr, chunks=(2,)) for arr in arrs]
x = concatenate(darrs, axis=0)
assert x.dtype == dt_out
def test_concatenate_unknown_axes():
dd = pytest.importorskip('dask.dataframe')
pd = pytest.importorskip('pandas')
a_df = pd.DataFrame({'x': np.arange(12)})
b_df = pd.DataFrame({'y': np.arange(12) * 10})
a_ddf = dd.from_pandas(a_df, sort=False, npartitions=3)
b_ddf = dd.from_pandas(b_df, sort=False, npartitions=3)
a_x = a_ddf.values
b_x = b_ddf.values
assert np.isnan(a_x.shape[0])
assert np.isnan(b_x.shape[0])
da.concatenate([a_x, b_x], axis=0) # works fine
with pytest.raises(ValueError) as exc_info:
da.concatenate([a_x, b_x], axis=1) # unknown chunks
assert 'nan' in str(exc_info.value)
assert 'allow_unknown_chunksize' in str(exc_info.value)
c_x = da.concatenate([a_x, b_x], axis=1, allow_unknown_chunksizes=True) # unknown chunks
assert_eq(c_x, np.concatenate([a_df.values, b_df.values], axis=1))
def test_concatenate_rechunk():
x = da.random.random((6, 6), chunks=(3, 3))
y = da.random.random((6, 6), chunks=(2, 2))
z = da.concatenate([x, y], axis=0)
assert z.shape == (12, 6)
assert z.chunks == ((3, 3, 2, 2, 2), (2, 1, 1, 2))
assert_eq(z, np.concatenate([x.compute(), y.compute()], axis=0))
z = da.concatenate([x, y], axis=1)
assert z.shape == (6, 12)
assert z.chunks == ((2, 1, 1, 2), (3, 3, 2, 2, 2))
assert_eq(z, np.concatenate([x.compute(), y.compute()], axis=1))
def test_concatenate_fixlen_strings():
x = np.array(['a', 'b', 'c'])
y = np.array(['aa', 'bb', 'cc'])
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
assert_eq(np.concatenate([x, y]),
da.concatenate([a, b]))
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_simple_row_wise():
a1 = np.ones((2, 2))
a2 = 2 * a1
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([a1, a2])
result = da.block([d1, d2])
assert_eq(expected, result)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_simple_column_wise():
a1 = np.ones((2, 2))
a2 = 2 * a1
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1], [a2]])
result = da.block([[d1], [d2]])
assert_eq(expected, result)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_with_1d_arrays_row_wise():
# # # 1-D vectors are treated as row arrays
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([a1, a2])
result = da.block([d1, d2])
assert_eq(expected, result)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_with_1d_arrays_multiple_rows():
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1, a2], [a1, a2]])
result = da.block([[d1, d2], [d1, d2]])
assert_eq(expected, result)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_with_1d_arrays_column_wise():
# # # 1-D vectors are treated as row arrays
a1 = np.array([1, 2, 3])
a2 = np.array([2, 3, 4])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[a1], [a2]])
result = da.block([[d1], [d2]])
assert_eq(expected, result)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_mixed_1d_and_2d():
a1 = np.ones((2, 2))
a2 = np.array([2, 2])
d1 = da.asarray(a1)
d2 = da.asarray(a2)
expected = np.block([[d1], [d2]])
result = da.block([[a1], [a2]])
assert_eq(expected, result)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_complicated():
# a bit more complicated
a1 = np.array([[1, 1, 1]])
a2 = np.array([[2, 2, 2]])
a3 = np.array([[3, 3, 3, 3, 3, 3]])
a4 = np.array([4, 4, 4, 4, 4, 4])
a5 = np.array(5)
a6 = np.array([6, 6, 6, 6, 6])
a7 = np.zeros((2, 6))
d1 = da.asarray(a1)
d2 = da.asarray(a2)
d3 = da.asarray(a3)
d4 = da.asarray(a4)
d5 = da.asarray(a5)
d6 = da.asarray(a6)
d7 = da.asarray(a7)
expected = np.block([[a1, a2],
[a3],
[a4],
[a5, a6],
[a7]])
result = da.block([[d1, d2],
[d3],
[d4],
[d5, d6],
[d7]])
assert_eq(expected, result)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_nested():
a1 = np.array([1, 1, 1])
a2 = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
a3 = np.array([3, 3, 3])
a4 = np.array([4, 4, 4])
a5 = np.array(5)
a6 = np.array([6, 6, 6, 6, 6])
a7 = np.zeros((2, 6))
d1 = da.asarray(a1)
d2 = da.asarray(a2)
d3 = da.asarray(a3)
d4 = da.asarray(a4)
d5 = da.asarray(a5)
d6 = da.asarray(a6)
d7 = da.asarray(a7)
expected = np.block([
[
np.block([
[a1],
[a3],
[a4]
]),
a2
],
[a5, a6],
[a7]
])
result = da.block([
[
da.block([
[d1],
[d3],
[d4]
]),
d2
],
[d5, d6],
[d7]
])
assert_eq(expected, result)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_3d():
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
d000 = da.asarray(a000)
d100 = da.asarray(a100)
d010 = da.asarray(a010)
d001 = da.asarray(a001)
d011 = da.asarray(a011)
d101 = da.asarray(a101)
d110 = da.asarray(a110)
d111 = da.asarray(a111)
expected = np.block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
result = da.block([
[
[d000, d001],
[d010, d011],
],
[
[d100, d101],
[d110, d111],
]
])
assert_eq(expected, result)
def test_block_with_mismatched_shape():
a = np.array([0, 0])
b = np.eye(2)
for arrays in [[a, b],
[b, a]]:
with pytest.raises(ValueError):
da.block(arrays)
@pytest.mark.skipif(LooseVersion(np.__version__) < '1.13.0',
reason="NumPy doesn't support `block` yet")
def test_block_no_lists():
assert_eq(da.block(1), np.block(1))
assert_eq(da.block(np.eye(3)), np.block(np.eye(3)))
def test_block_invalid_nesting():
for arrays in [
[1, [2]],
[1, []],
[[1], 2],
[[], 2],
[
[[1], [2]],
[[3, 4]],
[5] # missing brackets
],
]:
with pytest.raises(ValueError) as e:
da.block(arrays)
e.match(r'depths are mismatched')
def test_block_empty_lists():
for arrays in [
[],
[[]],
[[1], []],
]:
with pytest.raises(ValueError) as e:
da.block(arrays)
e.match(r'empty')
def test_block_tuple():
for arrays in [
([1, 2], [3, 4]),
[(1, 2), (3, 4)],
]:
with pytest.raises(TypeError) as e:
da.block(arrays)
e.match(r'tuple')
def test_binops():
a = Array(dict((('a', i), np.array([0])) for i in range(3)),
'a', chunks=((1, 1, 1),), dtype='i8')
b = Array(dict((('b', i), np.array([0])) for i in range(3)),
'b', chunks=((1, 1, 1),), dtype='i8')
result = elemwise(add, a, b, name='c')
assert result.dask == merge(a.dask, b.dask,
dict((('c', i), (add, ('a', i), ('b', i)))
for i in range(3)))
result = elemwise(pow, a, 2, name='c')
assert "'a', 0" in str(result.dask[('c', 0)])
assert "2" in str(result.dask[('c', 0)])
def test_broadcast_shapes():
assert () == broadcast_shapes()
assert (2, 5) == broadcast_shapes((2, 5))
assert (0, 5) == broadcast_shapes((0, 1), (1, 5))
assert np.allclose(
(2, np.nan), broadcast_shapes((1, np.nan), (2, 1)), equal_nan=True
)
assert np.allclose(
(2, np.nan), broadcast_shapes((2, 1), (1, np.nan)), equal_nan=True
)
assert (3, 4, 5) == broadcast_shapes((3, 4, 5), (4, 1), ())
assert (3, 4) == broadcast_shapes((3, 1), (1, 4), (4,))
assert (5, 6, 7, 3, 4) == broadcast_shapes((3, 1), (), (5, 6, 7, 1, 4))
pytest.raises(ValueError, lambda: broadcast_shapes((3,), (3, 4)))
pytest.raises(ValueError, lambda: broadcast_shapes((2, 3), (2, 3, 1)))
pytest.raises(ValueError, lambda: broadcast_shapes((2, 3), (1, np.nan)))
def test_elemwise_on_scalars():
x = np.arange(10, dtype=np.int64)
a = from_array(x, chunks=(5,))
assert len(a.__dask_keys__()) == 2
assert_eq(a.sum()**2, x.sum()**2)
y = np.arange(10, dtype=np.int32)
b = from_array(y, chunks=(5,))
result = a.sum() * b
# Dask 0-d arrays do not behave like numpy scalars for type promotion
assert result.dtype == np.int64
assert result.compute().dtype == np.int64
assert (x.sum() * y).dtype == np.int32
assert_eq((x.sum() * y).astype(np.int64), result)
def test_elemwise_with_ndarrays():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 3))
assert_eq(x + a, 2 * x)
assert_eq(a + x, 2 * x)
assert_eq(x + b, x + y)
assert_eq(b + x, x + y)
assert_eq(a + y, x + y)
assert_eq(y + a, x + y)
# Error on shape mismatch
pytest.raises(ValueError, lambda: a + y.T)
pytest.raises(ValueError, lambda: a + np.arange(2))
def test_elemwise_differently_chunked():
x = np.arange(3)
y = np.arange(12).reshape(4, 3)
a = from_array(x, chunks=(3,))
b = from_array(y, chunks=(2, 2))
assert_eq(a + b, x + y)
assert_eq(b + a, x + y)
def test_elemwise_dtype():
values = [
da.from_array(np.ones(5, np.float32), chunks=3),
da.from_array(np.ones(5, np.int16), chunks=3),
da.from_array(np.ones(5, np.int64), chunks=3),
da.from_array(np.ones((), np.float64), chunks=()) * 1e200,
np.ones(5, np.float32),
1, 1.0, 1e200, np.int64(1), np.ones((), np.int64),
]
for x in values:
for y in values:
assert da.maximum(x, y).dtype == da.result_type(x, y)
def test_operators():
x = np.arange(10)
y = np.arange(10).reshape((10, 1))
a = from_array(x, chunks=(5,))
b = from_array(y, chunks=(5, 1))
c = a + 1
assert_eq(c, x + 1)
c = a + b
assert_eq(c, x + x.reshape((10, 1)))
expr = (3 / a * b)**2 > 5
with pytest.warns(None): # ZeroDivisionWarning
assert_eq(expr, (3 / x * y)**2 > 5)
with pytest.warns(None): # OverflowWarning
c = da.exp(a)
assert_eq(c, np.exp(x))
assert_eq(abs(-a), a)
assert_eq(a, +x)
def test_operator_dtype_promotion():
x = np.arange(10, dtype=np.float32)
y = np.array([1])
a = from_array(x, chunks=(5,))
assert_eq(x + 1, a + 1) # still float32
assert_eq(x + 1e50, a + 1e50) # now float64
assert_eq(x + y, a + y) # also float64
def test_field_access():
x = np.array([(1, 1.0), (2, 2.0)], dtype=[('a', 'i4'), ('b', 'f4')])
y = from_array(x, chunks=(1,))
assert_eq(y['a'], x['a'])
assert_eq(y[['b', 'a']], x[['b', 'a']])
assert same_keys(y[['b', 'a']], y[['b', 'a']])
def test_field_access_with_shape():
dtype = [('col1', ('f4', (3, 2))), ('col2', ('f4', 3))]
data = np.ones((100, 50), dtype=dtype)
x = da.from_array(data, 10)
assert_eq(x['col1'], data['col1'])
assert_eq(x[['col1']], data[['col1']])
assert_eq(x['col2'], data['col2'])
assert_eq(x[['col1', 'col2']], data[['col1', 'col2']])
@pytest.mark.skipif(sys.version_info < (3, 5),
reason="Matrix multiplication operator only after Py3.5")
def test_matmul():
x = np.random.random((5, 5))
y = np.random.random((5, 2))
a = from_array(x, chunks=(1, 5))
b = from_array(y, chunks=(5, 1))
assert_eq(operator.matmul(a, b), a.dot(b))
assert_eq(operator.matmul(a, b), operator.matmul(x, y))
assert_eq(operator.matmul(a, y), operator.matmul(x, b))
list_vec = list(range(1, 6))
assert_eq(operator.matmul(list_vec, b), operator.matmul(list_vec, y))
assert_eq(operator.matmul(x, list_vec), operator.matmul(a, list_vec))
z = np.random.random((5, 5, 5))
c = from_array(z, chunks=(1, 5, 1))
assert_eq(operator.matmul(a, z), operator.matmul(x, c))
assert_eq(operator.matmul(z, a), operator.matmul(c, x))
def test_T():
x = np.arange(400).reshape((20, 20))
a = from_array(x, chunks=(5, 5))
assert_eq(x.T, a.T)
def test_norm():
a = np.arange(200, dtype='f8').reshape((20, 10))
a = a + (a.max() - a) * 1j
b = from_array(a, chunks=(5, 5))
# TODO: Deprecated method, remove test when method removed
with pytest.warns(UserWarning):
assert_eq(b.vnorm(), np.linalg.norm(a))
assert_eq(b.vnorm(ord=1), np.linalg.norm(a.flatten(), ord=1))
assert_eq(b.vnorm(ord=4, axis=0), np.linalg.norm(a, ord=4, axis=0))
assert b.vnorm(ord=4, axis=0, keepdims=True).ndim == b.ndim
split_every = {0: 3, 1: 3}
assert_eq(b.vnorm(ord=1, axis=0, split_every=split_every),
np.linalg.norm(a, ord=1, axis=0))
assert_eq(b.vnorm(ord=np.inf, axis=0, split_every=split_every),
np.linalg.norm(a, ord=np.inf, axis=0))
assert_eq(b.vnorm(ord=np.inf, split_every=split_every),
np.linalg.norm(a.flatten(), ord=np.inf))
def test_broadcast_to():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape in [a.shape, (5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
xb = chunk.broadcast_to(x, shape)
ab = broadcast_to(a, shape)
assert_eq(xb, ab)
if a.shape == ab.shape:
assert a is ab
pytest.raises(ValueError, lambda: broadcast_to(a, (2, 1, 6)))
pytest.raises(ValueError, lambda: broadcast_to(a, (3,)))
def test_broadcast_to_array():
x = np.random.randint(10, size=(5, 1, 6))
for shape in [(5, 0, 6), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
a = np.broadcast_to(x, shape)
d = broadcast_to(x, shape)
assert_eq(a, d)
def test_broadcast_to_scalar():
x = 5
for shape in [tuple(), (0,), (2, 3), (5, 4, 6), (2, 5, 1, 6), (3, 4, 5, 4, 6)]:
a = np.broadcast_to(x, shape)
d = broadcast_to(x, shape)
assert_eq(a, d)
def test_broadcast_to_chunks():
x = np.random.randint(10, size=(5, 1, 6))
a = from_array(x, chunks=(3, 1, 3))
for shape, chunks, expected_chunks in [
((5, 3, 6), (3, -1, 3), ((3, 2), (3,), (3, 3))),
((5, 3, 6), (3, 1, 3), ((3, 2), (1, 1, 1,), (3, 3))),
((2, 5, 3, 6), (1, 3, 1, 3), ((1, 1), (3, 2), (1, 1, 1,), (3, 3)))]:
xb = chunk.broadcast_to(x, shape)
ab = broadcast_to(a, shape, chunks=chunks)
assert_eq(xb, ab)
assert ab.chunks == expected_chunks
with pytest.raises(ValueError):
broadcast_to(a, a.shape, chunks=((2, 3), (1,), (3, 3)))
with pytest.raises(ValueError):
broadcast_to(a, a.shape, chunks=((3, 2), (3,), (3, 3)))
with pytest.raises(ValueError):
broadcast_to(a, (5, 2, 6), chunks=((3, 2), (3,), (3, 3)))
def test_broadcast_arrays():
# Calling `broadcast_arrays` with no arguments only works in NumPy 1.13.0+.
if LooseVersion(np.__version__) >= LooseVersion("1.13.0"):
assert np.broadcast_arrays() == da.broadcast_arrays()
a = np.arange(4)
d_a = da.from_array(a, chunks=tuple(s // 2 for s in a.shape))
a_0 = np.arange(4)[None, :]
a_1 = np.arange(4)[:, None]
d_a_0 = d_a[None, :]
d_a_1 = d_a[:, None]
a_r = np.broadcast_arrays(a_0, a_1)
d_r = da.broadcast_arrays(d_a_0, d_a_1)
assert isinstance(d_r, list)
assert len(a_r) == len(d_r)
for e_a_r, e_d_r in zip(a_r, d_r):
assert_eq(e_a_r, e_d_r)
@pytest.mark.parametrize('u_shape, v_shape', [
[tuple(), (2, 3)],
[(1,), (2, 3)],
[(1, 1), (2, 3)],
[(0, 3), (1, 3)],
[(2, 0), (2, 1)],
[(1, 0), (2, 1)],
[(0, 1), (1, 3)],
])
def test_broadcast_operator(u_shape, v_shape):
u = np.random.random(u_shape)
v = np.random.random(v_shape)
d_u = from_array(u, chunks=1)
d_v = from_array(v, chunks=1)
w = u * v
d_w = d_u * d_v
assert_eq(w, d_w)
@pytest.mark.parametrize('original_shape,new_shape,chunks', [
((10,), (10,), (3, 3, 4)),
((10,), (10, 1, 1), 5),
((10,), (1, 10,), 5),
((24,), (2, 3, 4), 12),
((1, 24,), (2, 3, 4), 12),
((2, 3, 4), (24,), (1, 3, 4)),
((2, 3, 4), (24,), 4),
((2, 3, 4), (24, 1), 4),
((2, 3, 4), (1, 24), 4),
((4, 4, 1), (4, 4), 2),
((4, 4), (4, 4, 1), 2),
((1, 4, 4), (4, 4), 2),
((1, 4, 4), (4, 4, 1), 2),
((1, 4, 4), (1, 1, 4, 4), 2),
((4, 4), (1, 4, 4, 1), 2),
((4, 4), (1, 4, 4), 2),
((2, 3), (2, 3), (1, 2)),
((2, 3), (3, 2), 3),
((4, 2, 3), (4, 6), 4),
((3, 4, 5, 6), (3, 4, 5, 6), (2, 3, 4, 5)),
((), (1,), 1),
((1,), (), 1),
((24,), (3, 8), 24),
((24,), (4, 6), 6),
((24,), (4, 3, 2), 6),
((24,), (4, 6, 1), 6),
((24,), (4, 6), (6, 12, 6)),
((64, 4), (8, 8, 4), (16, 2)),
((4, 64), (4, 8, 4, 2), (2, 16)),
((4, 8, 4, 2), (2, 1, 2, 32, 2), (2, 4, 2, 2)),
((4, 1, 4), (4, 4), (2, 1, 2)),
((0, 10), (0, 5, 2), (5, 5)),
((5, 0, 2), (0, 10), (5, 2, 2)),
((0,), (2, 0, 2), (4,)),
((2, 0, 2), (0,), (4, 4, 4)),
])
def test_reshape(original_shape, new_shape, chunks):
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks=chunks)
xr = x.reshape(new_shape)
ar = a.reshape(new_shape)
if a.shape == new_shape:
assert a is ar
assert_eq(xr, ar)
def test_reshape_exceptions():
x = np.random.randint(10, size=(5,))
a = from_array(x, chunks=(2,))
with pytest.raises(ValueError):
da.reshape(a, (100,))
def test_reshape_splat():
x = da.ones((5, 5), chunks=(2, 2))
assert_eq(x.reshape((25,)), x.reshape(25))
def test_reshape_fails_for_dask_only():
cases = [
((3, 4), (4, 3), 2),
]
for original_shape, new_shape, chunks in cases:
x = np.random.randint(10, size=original_shape)
a = from_array(x, chunks=chunks)
assert x.reshape(new_shape).shape == new_shape
with pytest.raises(ValueError):
da.reshape(a, new_shape)
def test_reshape_unknown_dimensions():
for original_shape in [(24,), (2, 12), (2, 3, 4)]:
for new_shape in [(-1,), (2, -1), (-1, 3, 4)]:
x = np.random.randint(10, size=original_shape)
a = from_array(x, 24)
assert_eq(x.reshape(new_shape), a.reshape(new_shape))
pytest.raises(ValueError, lambda: da.reshape(a, (-1, -1)))
def test_full():
d = da.full((3, 4), 2, chunks=((2, 1), (2, 2)))
assert d.chunks == ((2, 1), (2, 2))
assert_eq(d, np.full((3, 4), 2))
def test_map_blocks():
x = np.arange(400).reshape((20, 20))
d = from_array(x, chunks=(7, 7))
e = d.map_blocks(inc, dtype=d.dtype)
assert d.chunks == e.chunks
assert_eq(e, x + 1)
e = d.map_blocks(inc, name='increment')
assert e.name.startswith('increment-')
assert d.map_blocks(inc, name='foo').name != d.map_blocks(dec, name='foo').name
d = from_array(x, chunks=(10, 10))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=(5, 5), dtype=d.dtype)
assert e.chunks == ((5, 5), (5, 5))
assert_eq(e, x[::2, ::2])
d = from_array(x, chunks=(8, 8))
e = d.map_blocks(lambda x: x[::2, ::2], chunks=((4, 4, 2), (4, 4, 2)),
dtype=d.dtype)
assert_eq(e, x[::2, ::2])
def test_map_blocks2():
x = np.arange(10, dtype='i8')
d = from_array(x, chunks=(2,))
def func(block, block_id=None, c=0):
return np.ones_like(block) * sum(block_id) + c
out = d.map_blocks(func, dtype='i8')
expected = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4], dtype='i8')
assert_eq(out, expected)
assert same_keys(d.map_blocks(func, dtype='i8'), out)
out = d.map_blocks(func, dtype='i8', c=1)
expected = expected + 1
assert_eq(out, expected)
assert same_keys(d.map_blocks(func, dtype='i8', c=1), out)
def test_map_blocks_block_info():
x = da.arange(50, chunks=10)
def func(a, b, c, block_info=None):
for idx in [0, 2]: # positions in args
assert block_info[idx]['shape'] == (50,)
assert block_info[idx]['num-chunks'] == (5,)
start, stop = block_info[idx]['array-location'][0]
assert stop - start == 10
assert 0 <= start <= 40
assert 10 <= stop <= 50
assert 0 <= block_info[idx]['chunk-location'][0] <= 4
return a + b + c
z = da.map_blocks(func, x, 100, x + 1, dtype=x.dtype)
assert_eq(z, x + x + 1 + 100)
def test_map_blocks_with_constants():
d = da.arange(10, chunks=3)
e = d.map_blocks(add, 100, dtype=d.dtype)
assert_eq(e, np.arange(10) + 100)
assert_eq(da.map_blocks(sub, d, 10, dtype=d.dtype),
np.arange(10) - 10)
assert_eq(da.map_blocks(sub, 10, d, dtype=d.dtype),
10 - np.arange(10))
def test_map_blocks_with_kwargs():
d = da.arange(10, chunks=5)
result = d.map_blocks(np.max, axis=0, keepdims=True, dtype=d.dtype,
chunks=(1,))
assert_eq(result, np.array([4, 9]))
def test_map_blocks_with_chunks():
dx = da.ones((5, 3), chunks=(2, 2))
dy = da.ones((5, 3), chunks=(2, 2))
dz = da.map_blocks(np.add, dx, dy, chunks=dx.chunks)
assert_eq(dz, np.ones((5, 3)) * 2)
def test_map_blocks_dtype_inference():
x = np.arange(50).reshape((5, 10))
y = np.arange(10)
dx = da.from_array(x, chunks=5)
dy = da.from_array(y, chunks=5)
def foo(x, *args, **kwargs):
cast = kwargs.pop('cast', 'i8')
return (x + sum(args)).astype(cast)
assert_eq(dx.map_blocks(foo, dy, 1), foo(dx, dy, 1))
assert_eq(dx.map_blocks(foo, dy, 1, cast='f8'), foo(dx, dy, 1, cast='f8'))
assert_eq(dx.map_blocks(foo, dy, 1, cast='f8', dtype='f8'),
foo(dx, dy, 1, cast='f8', dtype='f8'))
def foo(x):
raise RuntimeError("Woops")
try:
dx.map_blocks(foo)
except Exception as e:
assert e.args[0].startswith("`dtype` inference failed")
assert "Please specify the dtype explicitly" in e.args[0]
assert 'RuntimeError' in e.args[0]
else:
assert False, "Should have errored"
def test_from_function_requires_block_args():
x = np.arange(10)
pytest.raises(Exception, lambda: from_array(x))
def test_repr():
d = da.ones((4, 4), chunks=(2, 2))
assert key_split(d.name) in repr(d)
assert str(d.shape) in repr(d)
assert str(d.dtype) in repr(d)
d = da.ones((4000, 4), chunks=(4, 2))
assert len(str(d)) < 1000
def test_slicing_with_ellipsis():
x = np.arange(256).reshape((4, 4, 4, 4))
d = da.from_array(x, chunks=((2, 2, 2, 2)))
assert_eq(d[..., 1], x[..., 1])
assert_eq(d[0, ..., 1], x[0, ..., 1])
def test_slicing_with_ndarray():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=((4, 4)))
assert_eq(d[np.arange(8)], x)
assert_eq(d[np.ones(8, dtype=bool)], x)
assert_eq(d[np.array([1])], x[[1]])
assert_eq(d[np.array([True, False, True] + [False] * 5)], x[[0, 2]])
def test_dtype():
d = da.ones((4, 4), chunks=(2, 2))
assert d.dtype == d.compute().dtype
assert (d * 1.0).dtype == (d + 1.0).compute().dtype
assert d.sum().dtype == d.sum().compute().dtype # no shape
def test_blockdims_from_blockshape():
assert blockdims_from_blockshape((10, 10), (4, 3)) == ((4, 4, 2), (3, 3, 3, 1))
pytest.raises(TypeError, lambda: blockdims_from_blockshape((10,), None))
assert blockdims_from_blockshape((1e2, 3), [1e1, 3]) == ((10, ) * 10, (3, ))
assert blockdims_from_blockshape((np.int8(10), ), (5, )) == ((5, 5), )
def test_coerce():
d0 = da.from_array(np.array(1), chunks=(1,))
d1 = da.from_array(np.array([1]), chunks=(1,))
with dask.config.set(scheduler='sync'):
for d in d0, d1:
assert bool(d) is True
assert int(d) == 1
assert float(d) == 1.0
assert complex(d) == complex(1)
a2 = np.arange(2)
d2 = da.from_array(a2, chunks=(2,))
for func in (int, float, complex):
pytest.raises(TypeError, lambda :func(d2))
def test_bool():
arr = np.arange(100).reshape((10,10))
darr = da.from_array(arr, chunks=(10,10))
with pytest.raises(ValueError):
bool(darr)
bool(darr == darr)
def test_store_kwargs():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
called = [False]
def get_func(*args, **kwargs):
assert kwargs.pop("foo") == "test kwarg"
r = dask.get(*args, **kwargs)
called[0] = True
return r
called[0] = False
at = np.zeros(shape=(10, 10))
store([a], [at], get=get_func, foo="test kwarg")
assert called[0]
called[0] = False
at = np.zeros(shape=(10, 10))
a.store(at, get=get_func, foo="test kwarg")
assert called[0]
called[0] = False
at = np.zeros(shape=(10, 10))
store([a], [at], get=get_func, return_store=True, foo="test kwarg")
assert called[0]
def test_store_delayed_target():
from dask.delayed import delayed
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
# empty buffers to be used as targets
targs = {}
def make_target(key):
a = np.empty((4, 4))
targs[key] = a
return a
# delayed calls to these targets
atd = delayed(make_target)('at')
btd = delayed(make_target)('bt')
# test not keeping result
st = store([a, b], [atd, btd])
at = targs['at']
bt = targs['bt']
assert st is None
assert_eq(at, a)
assert_eq(bt, b)
# test keeping result
for st_compute in [False, True]:
targs.clear()
st = store([a, b], [atd, btd], return_stored=True, compute=st_compute)
if st_compute:
assert all(
not any(dask.core.get_deps(e.dask)[0].values()) for e in st
)
st = dask.compute(*st)
at = targs['at']
bt = targs['bt']
assert st is not None
assert isinstance(st, tuple)
assert all([isinstance(v, np.ndarray) for v in st])
assert_eq(at, a)
assert_eq(bt, b)
assert_eq(st[0], a)
assert_eq(st[1], b)
pytest.raises(ValueError, lambda: store([a], [at, bt]))
pytest.raises(ValueError, lambda: store(at, at))
pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_store():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.empty(shape=(4, 4))
bt = np.empty(shape=(4, 4))
st = store([a, b], [at, bt])
assert st is None
assert (at == 2).all()
assert (bt == 3).all()
pytest.raises(ValueError, lambda: store([a], [at, bt]))
pytest.raises(ValueError, lambda: store(at, at))
pytest.raises(ValueError, lambda: store([at, bt], [at, bt]))
def test_store_regions():
d = da.ones((4, 4, 4), dtype=int, chunks=(2, 2, 2))
a, b = d + 1, d + 2
a = a[:, 1:, :].astype(float)
region = (slice(None, None, 2), slice(None), [1, 2, 4, 5])
# Single region:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=region, compute=False)
assert isinstance(v, Delayed)
assert (at == 0).all() and (bt[region] == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not ( bt == 0 ).all()
assert not (at == 2).all() and not ( at == 0 ).all()
# Multiple regions:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store([a, b], [at, bt], regions=[region, region], compute=False)
assert isinstance(v, Delayed)
assert (at == 0).all() and (bt[region] == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not ( bt == 0 ).all()
assert not (at == 2).all() and not ( at == 0 ).all()
# Single region (keep result):
for st_compute in [False, True]:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store(
[a, b], [at, bt], regions=region,
compute=st_compute, return_stored=True
)
assert isinstance(v, tuple)
assert all([isinstance(e, da.Array) for e in v])
if st_compute:
assert all(
not any(dask.core.get_deps(e.dask)[0].values()) for e in v
)
else:
assert (at == 0).all() and (bt[region] == 0).all()
ar, br = v
assert ar.dtype == a.dtype
assert br.dtype == b.dtype
assert ar.shape == a.shape
assert br.shape == b.shape
assert ar.chunks == a.chunks
assert br.chunks == b.chunks
ar, br = da.compute(ar, br)
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not ( bt == 0 ).all()
assert not (at == 2).all() and not ( at == 0 ).all()
assert (br == 3).all()
assert (ar == 2).all()
# Multiple regions (keep result):
for st_compute in [False, True]:
at = np.zeros(shape=(8, 3, 6))
bt = np.zeros(shape=(8, 4, 6))
v = store(
[a, b], [at, bt], regions=[region, region],
compute=st_compute, return_stored=True
)
assert isinstance(v, tuple)
assert all([isinstance(e, da.Array) for e in v])
if st_compute:
assert all(
not any(dask.core.get_deps(e.dask)[0].values()) for e in v
)
else:
assert (at == 0).all() and (bt[region] == 0).all()
ar, br = v
assert ar.dtype == a.dtype
assert br.dtype == b.dtype
assert ar.shape == a.shape
assert br.shape == b.shape
assert ar.chunks == a.chunks
assert br.chunks == b.chunks
ar, br = da.compute(ar, br)
assert (at[region] == 2).all() and (bt[region] == 3).all()
assert not (bt == 3).all() and not ( bt == 0 ).all()
assert not (at == 2).all() and not ( at == 0 ).all()
assert (br == 3).all()
assert (ar == 2).all()
def test_store_compute_false():
d = da.ones((4, 4), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.zeros(shape=(4, 4))
bt = np.zeros(shape=(4, 4))
v = store([a, b], [at, bt], compute=False)
assert isinstance(v, Delayed)
assert (at == 0).all() and (bt == 0).all()
assert all([ev is None for ev in v.compute()])
assert (at == 2).all() and (bt == 3).all()
at = np.zeros(shape=(4, 4))
bt = np.zeros(shape=(4, 4))
dat, dbt = store([a, b], [at, bt], compute=False, return_stored=True)
assert isinstance(dat, Array) and isinstance(dbt, Array)
assert (at == 0).all() and (bt == 0).all()
assert (dat.compute() == at).all() and (dbt.compute() == bt).all()
assert (at == 2).all() and (bt == 3).all()
def test_store_nocompute_regions():
x = da.ones(10, chunks=1)
y = np.zeros((2, 10))
d1 = da.store(x, y, regions=(0,), compute=False)
d2 = da.store(x, y, regions=(1,), compute=False)
assert d1.key != d2.key
class ThreadSafetyError(Exception):
pass
class NonthreadSafeStore(object):
def __init__(self):
self.in_use = False
def __setitem__(self, key, value):
if self.in_use:
raise ThreadSafetyError()
self.in_use = True
time.sleep(0.001)
self.in_use = False
class ThreadSafeStore(object):
def __init__(self):
self.concurrent_uses = 0
self.max_concurrent_uses = 0
def __setitem__(self, key, value):
self.concurrent_uses += 1
self.max_concurrent_uses = max(self.concurrent_uses, self.max_concurrent_uses)
time.sleep(0.01)
self.concurrent_uses -= 1
class CounterLock(object):
def __init__(self, *args, **kwargs):
self.lock = Lock(*args, **kwargs)
self.acquire_count = 0
self.release_count = 0
def acquire(self, *args, **kwargs):
self.acquire_count += 1
return self.lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
self.release_count += 1
return self.lock.release(*args, **kwargs)
def test_store_locks():
_Lock = type(Lock())
d = da.ones((10, 10), chunks=(2, 2))
a, b = d + 1, d + 2
at = np.zeros(shape=(10, 10))
bt = np.zeros(shape=(10, 10))
lock = Lock()
v = store([a, b], [at, bt], compute=False, lock=lock)
assert isinstance(v, Delayed)
dsk = v.dask
locks = set(vv for v in dsk.values() for vv in v if isinstance(vv, _Lock))
assert locks == set([lock])
# Ensure same lock applies over multiple stores
at = NonthreadSafeStore()
v = store([a, b], [at, at], lock=lock,
scheduler='threads', num_workers=10)
assert v is None
# Don't assume thread safety by default
at = NonthreadSafeStore()
assert store(a, at, scheduler='threads', num_workers=10) is None
assert a.store(at, scheduler='threads', num_workers=10) is None
# Ensure locks can be removed
at = ThreadSafeStore()
for i in range(10):
st = a.store(at, lock=False, scheduler='threads', num_workers=10)
assert st is None
if at.max_concurrent_uses > 1:
break
if i == 9:
assert False
# Verify number of lock calls
nchunks = np.sum([np.prod([len(c) for c in e.chunks]) for e in [a, b]])
for c in (False, True):
at = np.zeros(shape=(10, 10))
bt = np.zeros(shape=(10, 10))
lock = CounterLock()
v = store([a, b], [at, bt], lock=lock, compute=c, return_stored=True)
assert all(isinstance(e, Array) for e in v)
da.compute(v)
# When `return_stored=True` and `compute=False`,
# the lock should be acquired only once for store and load steps
# as they are fused together into one step.
assert lock.acquire_count == lock.release_count
if c:
assert lock.acquire_count == 2 * nchunks
else:
assert lock.acquire_count == nchunks
def test_store_method_return():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
for compute in [False, True]:
for return_stored in [False, True]:
at = np.zeros(shape=(10, 10))
r = a.store(
at, scheduler='threads',
compute=compute, return_stored=return_stored
)
if return_stored:
assert isinstance(r, Array)
elif compute:
assert r is None
else:
assert isinstance(r, Delayed)
@pytest.mark.xfail(reason="can't lock with multiprocessing")
def test_store_multiprocessing_lock():
d = da.ones((10, 10), chunks=(2, 2))
a = d + 1
at = np.zeros(shape=(10, 10))
st = a.store(at, scheduler='processes', num_workers=10)
assert st is None
def test_to_hdf5():
h5py = pytest.importorskip('h5py')
x = da.ones((4, 4), chunks=(2, 2))
y = da.ones(4, chunks=2, dtype='i4')
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x')
with h5py.File(fn) as f:
d = f['/x']
assert_eq(d[:], x)
assert d.chunks == (2, 2)
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x', chunks=None)
with h5py.File(fn) as f:
d = f['/x']
assert_eq(d[:], x)
assert d.chunks is None
with tmpfile('.hdf5') as fn:
x.to_hdf5(fn, '/x', chunks=(1, 1))
with h5py.File(fn) as f:
d = f['/x']
assert_eq(d[:], x)
assert d.chunks == (1, 1)
with tmpfile('.hdf5') as fn:
da.to_hdf5(fn, {'/x': x, '/y': y})
with h5py.File(fn) as f:
assert_eq(f['/x'][:], x)
assert f['/x'].chunks == (2, 2)
assert_eq(f['/y'][:], y)
assert f['/y'].chunks == (2,)
def test_to_dask_dataframe():
dd = pytest.importorskip('dask.dataframe')
a = da.ones((4,), chunks=(2,))
d = a.to_dask_dataframe()
assert isinstance(d, dd.Series)
a = da.ones((4, 4), chunks=(2, 2))
d = a.to_dask_dataframe()
assert isinstance(d, dd.DataFrame)
def test_np_array_with_zero_dimensions():
d = da.ones((4, 4), chunks=(2, 2))
assert_eq(np.array(d.sum()), np.array(d.compute().sum()))
def test_dtype_complex():
x = np.arange(24).reshape((4, 6)).astype('f4')
y = np.arange(24).reshape((4, 6)).astype('i8')
z = np.arange(24).reshape((4, 6)).astype('i2')
a = da.from_array(x, chunks=(2, 3))
b = da.from_array(y, chunks=(2, 3))
c = da.from_array(z, chunks=(2, 3))
def assert_eq(a, b):
return (isinstance(a, np.dtype) and
isinstance(b, np.dtype) and
str(a) == str(b))
assert_eq(a.dtype, x.dtype)
assert_eq(b.dtype, y.dtype)
assert_eq((a + 1).dtype, (x + 1).dtype)
assert_eq((a + b).dtype, (x + y).dtype)
assert_eq(a.T.dtype, x.T.dtype)
assert_eq(a[:3].dtype, x[:3].dtype)
assert_eq((a.dot(b.T)).dtype, (x.dot(y.T)).dtype)
assert_eq(stack([a, b]).dtype, np.vstack([x, y]).dtype)
assert_eq(concatenate([a, b]).dtype, np.concatenate([x, y]).dtype)
assert_eq(b.std().dtype, y.std().dtype)
assert_eq(c.sum().dtype, z.sum().dtype)
assert_eq(a.min().dtype, a.min().dtype)
assert_eq(b.std().dtype, b.std().dtype)
assert_eq(a.argmin(axis=0).dtype, a.argmin(axis=0).dtype)
assert_eq(da.sin(c).dtype, np.sin(z).dtype)
assert_eq(da.exp(b).dtype, np.exp(y).dtype)
assert_eq(da.floor(a).dtype, np.floor(x).dtype)
assert_eq(da.isnan(b).dtype, np.isnan(y).dtype)
with ignoring(ImportError):
assert da.isnull(b).dtype == 'bool'
assert da.notnull(b).dtype == 'bool'
x = np.array([('a', 1)], dtype=[('text', 'S1'), ('numbers', 'i4')])
d = da.from_array(x, chunks=(1,))
assert_eq(d['text'].dtype, x['text'].dtype)
assert_eq(d[['numbers', 'text']].dtype, x[['numbers', 'text']].dtype)
def test_astype():
x = np.ones((5, 5), dtype='f8')
d = da.from_array(x, chunks=(2,2))
assert d.astype('i8').dtype == 'i8'
assert_eq(d.astype('i8'), x.astype('i8'))
assert same_keys(d.astype('i8'), d.astype('i8'))
with pytest.raises(TypeError):
d.astype('i8', casting='safe')
with pytest.raises(TypeError):
d.astype('i8', not_a_real_kwarg='foo')
# smoketest with kwargs
assert_eq(d.astype('i8', copy=False), x.astype('i8', copy=False))
# Check it's a noop
assert d.astype('f8') is d
def test_arithmetic():
x = np.arange(5).astype('f4') + 2
y = np.arange(5).astype('i8') + 2
z = np.arange(5).astype('i4') + 2
a = da.from_array(x, chunks=(2,))
b = da.from_array(y, chunks=(2,))
c = da.from_array(z, chunks=(2,))
assert_eq(a + b, x + y)
assert_eq(a * b, x * y)
assert_eq(a - b, x - y)
assert_eq(a / b, x / y)
assert_eq(b & b, y & y)
assert_eq(b | b, y | y)
assert_eq(b ^ b, y ^ y)
assert_eq(a // b, x // y)
assert_eq(a ** b, x ** y)
assert_eq(a % b, x % y)
assert_eq(a > b, x > y)
assert_eq(a < b, x < y)
assert_eq(a >= b, x >= y)
assert_eq(a <= b, x <= y)
assert_eq(a == b, x == y)
assert_eq(a != b, x != y)
assert_eq(a + 2, x + 2)
assert_eq(a * 2, x * 2)
assert_eq(a - 2, x - 2)
assert_eq(a / 2, x / 2)
assert_eq(b & True, y & True)
assert_eq(b | True, y | True)
assert_eq(b ^ True, y ^ True)
assert_eq(a // 2, x // 2)
assert_eq(a ** 2, x ** 2)
assert_eq(a % 2, x % 2)
assert_eq(a > 2, x > 2)
assert_eq(a < 2, x < 2)
assert_eq(a >= 2, x >= 2)
assert_eq(a <= 2, x <= 2)
assert_eq(a == 2, x == 2)
assert_eq(a != 2, x != 2)
assert_eq(2 + b, 2 + y)
assert_eq(2 * b, 2 * y)
assert_eq(2 - b, 2 - y)
assert_eq(2 / b, 2 / y)
assert_eq(True & b, True & y)
assert_eq(True | b, True | y)
assert_eq(True ^ b, True ^ y)
assert_eq(2 // b, 2 // y)
assert_eq(2 ** b, 2 ** y)
assert_eq(2 % b, 2 % y)
assert_eq(2 > b, 2 > y)
assert_eq(2 < b, 2 < y)
assert_eq(2 >= b, 2 >= y)
assert_eq(2 <= b, 2 <= y)
assert_eq(2 == b, 2 == y)
assert_eq(2 != b, 2 != y)
assert_eq(-a, -x)
assert_eq(abs(a), abs(x))
assert_eq(~(a == b), ~(x == y))
assert_eq(~(a == b), ~(x == y))
assert_eq(da.logaddexp(a, b), np.logaddexp(x, y))
assert_eq(da.logaddexp2(a, b), np.logaddexp2(x, y))
with pytest.warns(None): # Overflow warning
assert_eq(da.exp(b), np.exp(y))
assert_eq(da.log(a), np.log(x))
assert_eq(da.log10(a), np.log10(x))
assert_eq(da.log1p(a), np.log1p(x))
with pytest.warns(None): # Overflow warning
assert_eq(da.expm1(b), np.expm1(y))
assert_eq(da.sqrt(a), np.sqrt(x))
assert_eq(da.square(a), np.square(x))
assert_eq(da.sin(a), np.sin(x))
assert_eq(da.cos(b), np.cos(y))
assert_eq(da.tan(a), np.tan(x))
assert_eq(da.arcsin(b / 10), np.arcsin(y / 10))
assert_eq(da.arccos(b / 10), np.arccos(y / 10))
assert_eq(da.arctan(b / 10), np.arctan(y / 10))
assert_eq(da.arctan2(b * 10, a), np.arctan2(y * 10, x))
assert_eq(da.hypot(b, a), np.hypot(y, x))
assert_eq(da.sinh(a), np.sinh(x))
with pytest.warns(None): # Overflow warning
assert_eq(da.cosh(b), np.cosh(y))
assert_eq(da.tanh(a), np.tanh(x))
assert_eq(da.arcsinh(b * 10), np.arcsinh(y * 10))
assert_eq(da.arccosh(b * 10), np.arccosh(y * 10))
assert_eq(da.arctanh(b / 10), np.arctanh(y / 10))
assert_eq(da.deg2rad(a), np.deg2rad(x))
assert_eq(da.rad2deg(a), np.rad2deg(x))
assert_eq(da.logical_and(a < 1, b < 4), np.logical_and(x < 1, y < 4))
assert_eq(da.logical_or(a < 1, b < 4), np.logical_or(x < 1, y < 4))
assert_eq(da.logical_xor(a < 1, b < 4), np.logical_xor(x < 1, y < 4))
assert_eq(da.logical_not(a < 1), np.logical_not(x < 1))
assert_eq(da.maximum(a, 5 - a), np.maximum(a, 5 - a))
assert_eq(da.minimum(a, 5 - a), np.minimum(a, 5 - a))
assert_eq(da.fmax(a, 5 - a), np.fmax(a, 5 - a))
assert_eq(da.fmin(a, 5 - a), np.fmin(a, 5 - a))
assert_eq(da.isreal(a + 1j * b), np.isreal(x + 1j * y))
assert_eq(da.iscomplex(a + 1j * b), np.iscomplex(x + 1j * y))
assert_eq(da.isfinite(a), np.isfinite(x))
assert_eq(da.isinf(a), np.isinf(x))
assert_eq(da.isnan(a), np.isnan(x))
assert_eq(da.signbit(a - 3), np.signbit(x - 3))
assert_eq(da.copysign(a - 3, b), np.copysign(x - 3, y))
assert_eq(da.nextafter(a - 3, b), np.nextafter(x - 3, y))
with pytest.warns(None): # overflow warning
assert_eq(da.ldexp(c, c), np.ldexp(z, z))
assert_eq(da.fmod(a * 12, b), np.fmod(x * 12, y))
assert_eq(da.floor(a * 0.5), np.floor(x * 0.5))
assert_eq(da.ceil(a), np.ceil(x))
assert_eq(da.trunc(a / 2), np.trunc(x / 2))
assert_eq(da.degrees(b), np.degrees(y))
assert_eq(da.radians(a), np.radians(x))
assert_eq(da.rint(a + 0.3), np.rint(x + 0.3))
assert_eq(da.fix(a - 2.5), np.fix(x - 2.5))
assert_eq(da.angle(a + 1j), np.angle(x + 1j))
assert_eq(da.real(a + 1j), np.real(x + 1j))
assert_eq((a + 1j).real, np.real(x + 1j))
assert_eq(da.imag(a + 1j), np.imag(x + 1j))
assert_eq((a + 1j).imag, np.imag(x + 1j))
assert_eq(da.conj(a + 1j * b), np.conj(x + 1j * y))
assert_eq((a + 1j * b).conj(), (x + 1j * y).conj())
assert_eq(da.clip(b, 1, 4), np.clip(y, 1, 4))
assert_eq(b.clip(1, 4), y.clip(1, 4))
assert_eq(da.fabs(b), np.fabs(y))
assert_eq(da.sign(b - 2), np.sign(y - 2))
assert_eq(da.absolute(b - 2), np.absolute(y - 2))
assert_eq(da.absolute(b - 2 + 1j), np.absolute(y - 2 + 1j))
l1, l2 = da.frexp(a)
r1, r2 = np.frexp(x)
assert_eq(l1, r1)
assert_eq(l2, r2)
l1, l2 = da.modf(a)
r1, r2 = np.modf(x)
assert_eq(l1, r1)
assert_eq(l2, r2)
assert_eq(da.around(a, -1), np.around(x, -1))
def test_elemwise_consistent_names():
a = da.from_array(np.arange(5, dtype='f4'), chunks=(2,))
b = da.from_array(np.arange(5, dtype='f4'), chunks=(2,))
assert same_keys(a + b, a + b)
assert same_keys(a + 2, a + 2)
assert same_keys(da.exp(a), da.exp(a))
assert same_keys(da.exp(a, dtype='f8'), da.exp(a, dtype='f8'))
assert same_keys(da.maximum(a, b), da.maximum(a, b))
def test_optimize():
x = np.arange(5).astype('f4')
a = da.from_array(x, chunks=(2,))
expr = a[1:4] + 1
result = optimize(expr.dask, expr.__dask_keys__())
assert isinstance(result, dict)
assert all(key in result for key in expr.__dask_keys__())
def test_slicing_with_non_ndarrays():
class ARangeSlice(object):
def __init__(self, start, stop):
self.start = start
self.stop = stop
def __array__(self):
return np.arange(self.start, self.stop)
class ARangeSlicable(object):
dtype = np.dtype('i8')
def __init__(self, n):
self.n = n
@property
def shape(self):
return (self.n,)
def __getitem__(self, key):
return ARangeSlice(key[0].start, key[0].stop)
x = da.from_array(ARangeSlicable(10), chunks=(4,))
assert_eq((x + 1).sum(), (np.arange(10, dtype=x.dtype) + 1).sum())
def test_getter():
assert type(getter(np.matrix([[1]]), 0)) is np.ndarray
assert type(getter(np.matrix([[1]]), 0, asarray=False)) is np.matrix
assert_eq(getter([1, 2, 3, 4, 5], slice(1, 4)), np.array([2, 3, 4]))
assert_eq(getter(np.arange(5), (None, slice(None, None))),
np.arange(5)[None, :])
def test_size():
x = da.ones((10, 2), chunks=(3, 1))
assert x.size == np.array(x).size
assert isinstance(x.size, int)
def test_nbytes():
x = da.ones((10, 2), chunks=(3, 1))
assert x.nbytes == np.array(x).nbytes
def test_itemsize():
x = da.ones((10, 2), chunks=(3, 1))
assert x.itemsize == 8
def test_Array_normalizes_dtype():
x = da.ones((3,), chunks=(1,), dtype=int)
assert isinstance(x.dtype, np.dtype)
def test_from_array_with_lock():
x = np.arange(10)
d = da.from_array(x, chunks=5, lock=True)
tasks = [v for k, v in d.dask.items() if k[0] == d.name]
assert hasattr(tasks[0][4], 'acquire')
assert len(set(task[4] for task in tasks)) == 1
assert_eq(d, x)
lock = Lock()
e = da.from_array(x, chunks=5, lock=lock)
f = da.from_array(x, chunks=5, lock=lock)
assert_eq(e + f, x + x)
class MyArray(object):
def __init__(self, x):
self.x = x
self.dtype = x.dtype
self.shape = x.shape
self.ndim = len(x.shape)
def __getitem__(self, i):
return self.x[i]
@pytest.mark.parametrize('x,chunks', [
(np.arange(25).reshape((5, 5)), (5, 5)),
(np.arange(25).reshape((5, 5)), -1),
(np.array([[1]]), 1),
(np.array(1), 1),
])
def test_from_array_tasks_always_call_getter(x, chunks):
dx = da.from_array(MyArray(x), chunks=chunks, asarray=False)
assert_eq(x, dx)
def test_from_array_ndarray_onechunk():
"""ndarray with a single chunk produces a minimal single key dict
"""
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=-1)
assert_eq(x, dx)
assert len(dx.dask) == 1
assert dx.dask[dx.name, 0, 0] is x
def test_from_array_ndarray_getitem():
"""For ndarray, don't use getter / getter_nofancy; use the cleaner
operator.getitem"""
x = np.array([[1, 2], [3, 4]])
dx = da.from_array(x, chunks=(1, 2))
assert_eq(x, dx)
assert dx.dask[dx.name, 0, 0][0] == operator.getitem
@pytest.mark.parametrize(
'x', [[1, 2], (1, 2), memoryview(b'abc')] +
([buffer(b'abc')] if sys.version_info[0] == 2 else [])) # noqa: F821
def test_from_array_list(x):
"""Lists, tuples, and memoryviews are automatically converted to ndarray
"""
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(dx.dask[dx.name, 0], np.ndarray)
dx = da.from_array(x, chunks=1)
assert_eq(np.array(x), dx)
assert dx.dask[dx.name, 0][0] == operator.getitem
assert isinstance(dx.dask[dx.name.replace('array', 'array-original')],
np.ndarray)
@pytest.mark.parametrize(
'type_', [t for t in np.ScalarType if t not in [memoryview] +
([buffer] if sys.version_info[0] == 2 else [])]) # noqa: F821
def test_from_array_scalar(type_):
"""Python and numpy scalars are automatically converted to ndarray
"""
if type_ == np.datetime64:
x = np.datetime64('2000-01-01')
else:
x = type_(1)
dx = da.from_array(x, chunks=-1)
assert_eq(np.array(x), dx)
assert isinstance(dx.dask[dx.name, ], np.ndarray)
@pytest.mark.parametrize('asarray,cls', [
(True, np.ndarray),
(False, np.matrix),
])
def test_from_array_no_asarray(asarray, cls):
def assert_chunks_are_of_type(x):
chunks = compute_as_if_collection(Array, x.dask, x.__dask_keys__())
for c in concat(chunks):
assert type(c) is cls
x = np.matrix(np.arange(100).reshape((10, 10)))
dx = da.from_array(x, chunks=(5, 5), asarray=asarray)
assert_chunks_are_of_type(dx)
assert_chunks_are_of_type(dx[0:5])
assert_chunks_are_of_type(dx[0:5][:, 0])
def test_from_array_getitem():
x = np.arange(10)
def my_getitem(x, ind):
return x[ind]
y = da.from_array(x, chunks=(5,), getitem=my_getitem)
for k, v in y.dask.items():
if isinstance(v, tuple):
assert v[0] is my_getitem
assert_eq(x, y)
def test_from_array_minus_one():
x = np.arange(10)
y = da.from_array(x, -1)
assert y.chunks == ((10,),)
assert_eq(x, y)
def test_from_array_copy():
# Regression test for https://github.com/dask/dask/issues/3751
x = np.arange(10)
y = da.from_array(x, -1)
assert y.npartitions == 1
y_c = y.copy()
assert y is not y_c
assert y.compute() is not y_c.compute()
def test_asarray():
assert_eq(da.asarray([1, 2, 3]), np.asarray([1, 2, 3]))
x = da.asarray([1, 2, 3])
assert da.asarray(x) is x
def test_asarray_h5py():
h5py = pytest.importorskip('h5py')
with tmpfile('.hdf5') as fn:
with h5py.File(fn) as f:
d = f.create_dataset('/x', shape=(2, 2), dtype=float)
x = da.asarray(d)
assert d in x.dask.values()
assert not any(isinstance(v, np.ndarray) for v in x.dask.values())
def test_asanyarray():
x = np.matrix([1, 2, 3])
dx = da.asanyarray(x)
assert dx.numblocks == (1, 1)
chunks = compute_as_if_collection(Array, dx.dask, dx.__dask_keys__())
assert isinstance(chunks[0][0], np.matrix)
assert da.asanyarray(dx) is dx
def test_from_func():
x = np.arange(10)
f = lambda n: n * x
d = from_func(f, (10,), x.dtype, kwargs={'n': 2})
assert d.shape == x.shape
assert d.dtype == x.dtype
assert_eq(d.compute(), 2 * x)
assert same_keys(d, from_func(f, (10,), x.dtype, kwargs={'n': 2}))
def test_concatenate3_2():
x = np.array([1, 2])
assert_eq(concatenate3([x, x, x]),
np.array([1, 2, 1, 2, 1, 2]))
x = np.array([[1, 2]])
assert (concatenate3([[x, x, x], [x, x, x]]) ==
np.array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])).all()
assert (concatenate3([[x, x], [x, x], [x, x]]) ==
np.array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])).all()
x = np.arange(12).reshape((2, 2, 3))
assert_eq(concatenate3([[[x, x, x], [x, x, x]],
[[x, x, x], [x, x, x]]]),
np.array([[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]],
[[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5],
[ 0, 1, 2, 0, 1, 2, 0, 1, 2],
[ 3, 4, 5, 3, 4, 5, 3, 4, 5]],
[[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11],
[ 6, 7, 8, 6, 7, 8, 6, 7, 8],
[ 9, 10, 11, 9, 10, 11, 9, 10, 11]]]))
def test_map_blocks3():
x = np.arange(10)
y = np.arange(10) * 2
d = da.from_array(x, chunks=5)
e = da.from_array(y, chunks=5)
assert_eq(da.core.map_blocks(lambda a, b: a + 2 * b, d, e, dtype=d.dtype),
x + 2 * y)
z = np.arange(100).reshape((10, 10))
f = da.from_array(z, chunks=5)
func = lambda a, b: a + 2 * b
res = da.core.map_blocks(func, d, f, dtype=d.dtype)
assert_eq(res, x + 2 * z)
assert same_keys(da.core.map_blocks(func, d, f, dtype=d.dtype), res)
assert_eq(da.map_blocks(func, f, d, dtype=d.dtype), z + 2 * x)
def test_from_array_with_missing_chunks():
x = np.random.randn(2, 4, 3)
d = da.from_array(x, chunks=(None, 2, None))
assert d.chunks == da.from_array(x, chunks=(2, 2, 3)).chunks
def test_normalize_chunks():
assert normalize_chunks(3, (4, 6)) == ((3, 1), (3, 3))
assert normalize_chunks(((3, 3), (8,)), (6, 8)) == ((3, 3), (8, ))
assert normalize_chunks((4, 5), (9,)) == ((4, 5), )
assert normalize_chunks((4, 5), (9, 9)) == ((4, 4, 1), (5, 4))
assert normalize_chunks(-1, (5, 5)) == ((5,), (5, ))
assert normalize_chunks((3, -1), (5, 5)) == ((3, 2), (5, ))
assert normalize_chunks({0: 3}, (5, 5)) == ((3, 2), (5,))
assert normalize_chunks([[2, 2], [3, 3]]) == ((2, 2), (3, 3))
assert normalize_chunks(10, (30, 5)), ((10, 10, 10), (5,))
assert normalize_chunks((), (0, 0)), ((0,), (0,))
assert normalize_chunks(-1, (0, 3)), ((0,), (3,))
assert normalize_chunks("auto", shape=(20,), limit=5, dtype='uint8') == \
((5, 5, 5, 5),)
with pytest.raises(ValueError):
normalize_chunks(((10,), ), (11, ))
with pytest.raises(ValueError):
normalize_chunks(((5, ), (5, )), (5, ))
def test_align_chunks_to_previous_chunks():
chunks = normalize_chunks('auto',
shape=(2000,),
previous_chunks=(512,),
limit='600 B', dtype=np.uint8)
assert chunks == ((512, 512, 512, 2000 - 512 * 3),)
chunks = normalize_chunks('auto',
shape=(2000,),
previous_chunks=(128,),
limit='600 B', dtype=np.uint8)
assert chunks == ((512, 512, 512, 2000 - 512 * 3),)
chunks = normalize_chunks('auto',
shape=(2000,),
previous_chunks=(512,),
limit='1200 B', dtype=np.uint8)
assert chunks == ((1024, 2000 - 1024),)
chunks = normalize_chunks('auto',
shape=(3, 10211, 10376),
previous_chunks=(1, 512, 512),
limit='1MiB', dtype=np.float32)
assert chunks[0] == (1, 1, 1)
assert all(c % 512 == 0 for c in chunks[1][:-1])
assert all(c % 512 == 0 for c in chunks[2][:-1])
def test_raise_on_no_chunks():
x = da.ones(6, chunks=3)
try:
Array(x.dask, x.name, chunks=None, dtype=x.dtype, shape=None)
assert False
except ValueError as e:
assert "dask.pydata.org" in str(e)
pytest.raises(ValueError, lambda: da.ones(6))
def test_chunks_is_immutable():
x = da.ones(6, chunks=3)
try:
x.chunks = 2
assert False
except TypeError as e:
assert 'rechunk(2)' in str(e)
def test_raise_on_bad_kwargs():
x = da.ones(5, chunks=3)
try:
da.minimum(x, foo=None)
except TypeError as e:
assert 'minimum' in str(e)
assert 'foo' in str(e)
def test_long_slice():
x = np.arange(10000)
d = da.from_array(x, chunks=1)
assert_eq(d[8000:8200], x[8000:8200])
def test_h5py_newaxis():
h5py = pytest.importorskip('h5py')
with tmpfile('h5') as fn:
with h5py.File(fn) as f:
x = f.create_dataset('/x', shape=(10, 10), dtype='f8')
d = da.from_array(x, chunks=(5, 5))
assert d[None, :, :].compute(scheduler='sync').shape == (1, 10, 10)
assert d[:, None, :].compute(scheduler='sync').shape == (10, 1, 10)
assert d[:, :, None].compute(scheduler='sync').shape == (10, 10, 1)
assert same_keys(d[:, :, None], d[:, :, None])
def test_ellipsis_slicing():
assert_eq(da.ones(4, chunks=2)[...], np.ones(4))
def test_point_slicing():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[1, 2, 5, 5], [3, 1, 6, 1]]
assert_eq(result, x[[1, 2, 5, 5], [3, 1, 6, 1]])
result = d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]]
assert_eq(result, x[[0, 1, 6, 0], [0, 1, 0, 7]])
assert same_keys(result, d.vindex[[0, 1, 6, 0], [0, 1, 0, 7]])
def test_point_slicing_with_full_slice():
from dask.array.core import _vindex_transpose, _get_axis
x = np.arange(4 * 5 * 6 * 7).reshape((4, 5, 6, 7))
d = da.from_array(x, chunks=(2, 3, 3, 4))
inds = [[[1, 2, 3], None, [3, 2, 1], [5, 3, 4]],
[[1, 2, 3], None, [4, 3, 2], None],
[[1, 2, 3], [3, 2, 1]],
[[1, 2, 3], [3, 2, 1], [3, 2, 1], [5, 3, 4]],
[[], [], [], None],
[np.array([1, 2, 3]), None, np.array([4, 3, 2]), None],
[None, None, [1, 2, 3], [4, 3, 2]],
[None, [0, 2, 3], None, [0, 3, 2]]]
for ind in inds:
slc = [i if isinstance(i, (np.ndarray, list)) else slice(None, None)
for i in ind]
result = d.vindex[tuple(slc)]
# Rotate the expected result accordingly
axis = _get_axis(ind)
expected = _vindex_transpose(x[tuple(slc)], axis)
assert_eq(result, expected)
# Always have the first axis be the length of the points
k = len(next(i for i in ind if isinstance(i, (np.ndarray, list))))
assert result.shape[0] == k
def test_slice_with_floats():
d = da.ones((5,), chunks=(3,))
with pytest.raises(IndexError):
d[1.5]
with pytest.raises(IndexError):
d[0:1.5]
with pytest.raises(IndexError):
d[[1, 1.5]]
def test_slice_with_integer_types():
x = np.arange(10)
dx = da.from_array(x, chunks=5)
inds = np.array([0, 3, 6], dtype='u8')
assert_eq(dx[inds], x[inds])
assert_eq(dx[inds.astype('u4')], x[inds.astype('u4')])
inds = np.array([0, 3, 6], dtype=np.int64)
assert_eq(dx[inds], x[inds])
assert_eq(dx[inds.astype('u4')], x[inds.astype('u4')])
def test_index_with_integer_types():
x = np.arange(10)
dx = da.from_array(x, chunks=5)
inds = int(3)
assert_eq(dx[inds], x[inds])
inds = np.int64(3)
assert_eq(dx[inds], x[inds])
def test_vindex_basic():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
# cases where basic and advanced indexing coincide
result = d.vindex[0]
assert_eq(result, x[0])
result = d.vindex[0, 1]
assert_eq(result, x[0, 1])
result = d.vindex[[0, 1], ::-1] # slices last
assert_eq(result, x[:2, ::-1])
def test_vindex_nd():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(3, 4))
result = d.vindex[[[0, 1], [6, 0]], [[0, 1], [0, 7]]]
assert_eq(result, x[[[0, 1], [6, 0]], [[0, 1], [0, 7]]])
result = d.vindex[np.arange(7)[:, None], np.arange(8)[None, :]]
assert_eq(result, x)
result = d.vindex[np.arange(7)[None, :], np.arange(8)[:, None]]
assert_eq(result, x.T)
def test_vindex_negative():
x = np.arange(10)
d = da.from_array(x, chunks=(5, 5))
result = d.vindex[np.array([0, -1])]
assert_eq(result, x[np.array([0, -1])])
def test_vindex_errors():
d = da.ones((5, 5, 5), chunks=(3, 3, 3))
pytest.raises(IndexError, lambda: d.vindex[np.newaxis])
pytest.raises(IndexError, lambda: d.vindex[[1, 2], [1, 2, 3]])
pytest.raises(IndexError, lambda: d.vindex[[True] * 5])
pytest.raises(IndexError, lambda: d.vindex[[0], [5]])
pytest.raises(IndexError, lambda: d.vindex[[0], [-6]])
def test_vindex_merge():
from dask.array.core import _vindex_merge
locations = [1], [2, 0]
values = [np.array([[1, 2, 3]]),
np.array([[10, 20, 30], [40, 50, 60]])]
assert (_vindex_merge(locations, values) == np.array([[40, 50, 60],
[1, 2, 3],
[10, 20, 30]])).all()
def test_vindex_identity():
rng = da.random.RandomState(42)
a, b = 10, 20
x = rng.random(a, chunks=a // 2)
assert x is x.vindex[:]
assert x is x.vindex[:a]
pytest.raises(IndexError, lambda: x.vindex[:a - 1])
pytest.raises(IndexError, lambda: x.vindex[1:])
pytest.raises(IndexError, lambda: x.vindex[0:a:2])
x = rng.random((a, b), chunks=(a // 2, b // 2))
assert x is x.vindex[:, :]
assert x is x.vindex[:a, :b]
pytest.raises(IndexError, lambda: x.vindex[:, :b - 1])
pytest.raises(IndexError, lambda: x.vindex[:, 1:])
pytest.raises(IndexError, lambda: x.vindex[:, 0:b:2])
def test_empty_array():
assert_eq(np.arange(0), da.arange(0, chunks=5))
def test_memmap():
with tmpfile('npy') as fn_1:
with tmpfile('npy') as fn_2:
try:
x = da.arange(100, chunks=15)
target = np.memmap(fn_1, shape=x.shape, mode='w+', dtype=x.dtype)
x.store(target)
assert_eq(target, x)
np.save(fn_2, target)
assert_eq(np.load(fn_2, mmap_mode='r'), x)
finally:
target._mmap.close()
def test_to_npy_stack():
x = np.arange(5 * 10 * 10).reshape((5, 10, 10))
d = da.from_array(x, chunks=(2, 4, 4))
with tmpdir() as dirname:
stackdir = os.path.join(dirname, 'test')
da.to_npy_stack(stackdir, d, axis=0)
assert os.path.exists(os.path.join(stackdir, '0.npy'))
assert (np.load(os.path.join(stackdir, '1.npy')) == x[2:4]).all()
e = da.from_npy_stack(stackdir)
assert_eq(d, e)
def test_view():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.view('i4'), d.view('i4'))
assert_eq(x.view('i2'), d.view('i2'))
assert all(isinstance(s, int) for s in d.shape)
x = np.arange(8, dtype='i1')
d = da.from_array(x, chunks=(4,))
assert_eq(x.view('i4'), d.view('i4'))
with pytest.raises(ValueError):
x = np.arange(8, dtype='i1')
d = da.from_array(x, chunks=(3,))
d.view('i4')
with pytest.raises(ValueError):
d.view('i4', order='asdf')
def test_view_fortran():
x = np.asfortranarray(np.arange(64).reshape((8, 8)))
d = da.from_array(x, chunks=(2, 3))
assert_eq(x.T.view('i4').T, d.view('i4', order='F'))
assert_eq(x.T.view('i2').T, d.view('i2', order='F'))
def test_h5py_tokenize():
h5py = pytest.importorskip('h5py')
with tmpfile('hdf5') as fn1:
with tmpfile('hdf5') as fn2:
f = h5py.File(fn1)
g = h5py.File(fn2)
f['x'] = np.arange(10).astype(float)
g['x'] = np.ones(10).astype(float)
x1 = f['x']
x2 = g['x']
assert tokenize(x1) != tokenize(x2)
def test_map_blocks_with_changed_dimension():
x = np.arange(56).reshape((7, 8))
d = da.from_array(x, chunks=(7, 4))
e = d.map_blocks(lambda b: b.sum(axis=0), chunks=(4,), drop_axis=0,
dtype=d.dtype)
assert e.chunks == ((4, 4),)
assert_eq(e, x.sum(axis=0))
# Provided chunks have wrong shape
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=0), chunks=(7, 4), drop_axis=0)
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=0), chunks=((4, 4, 4),), drop_axis=0)
# Can't drop axis with more than 1 block
with pytest.raises(ValueError):
d.map_blocks(lambda b: b.sum(axis=1), drop_axis=1, dtype=d.dtype)
# Adding axis with a gap
with pytest.raises(ValueError):
d.map_blocks(lambda b: b, new_axis=(3, 4))
d = da.from_array(x, chunks=(4, 8))
e = d.map_blocks(lambda b: b.sum(axis=1), drop_axis=1, dtype=d.dtype)
assert e.chunks == ((4, 3),)
assert_eq(e, x.sum(axis=1))
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = d.map_blocks(lambda b: b[None, :, :, None],
chunks=(1, 4, 4, 1), new_axis=[0, 3], dtype=d.dtype)
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
e = d.map_blocks(lambda b: b[None, :, :, None],
new_axis=[0, 3], dtype=d.dtype)
assert e.chunks == ((1,), (4, 4), (4, 4), (1,))
assert_eq(e, x[None, :, :, None])
# Both new_axis and drop_axis
d = da.from_array(x, chunks=(8, 4))
e = d.map_blocks(lambda b: b.sum(axis=0)[:, None, None],
drop_axis=0, new_axis=(1, 2), dtype=d.dtype)
assert e.chunks == ((4, 4), (1,), (1,))
assert_eq(e, x.sum(axis=0)[:, None, None])
d = da.from_array(x, chunks=(4, 8))
e = d.map_blocks(lambda b: b.sum(axis=1)[:, None, None],
drop_axis=1, new_axis=(1, 2), dtype=d.dtype)
assert e.chunks == ((4, 4), (1,), (1,))
assert_eq(e, x.sum(axis=1)[:, None, None])
def test_broadcast_chunks():
assert broadcast_chunks() == ()
assert broadcast_chunks(((2, 3),)) == ((2, 3),)
assert broadcast_chunks(((5, 5),), ((5, 5),)) == ((5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5),)
assert broadcast_chunks(b, a) == ((10, 10, 10), (5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((1,), (5, 5),)
assert broadcast_chunks(a, b) == ((10, 10, 10), (5, 5),)
a = ((10, 10, 10), (5, 5),)
b = ((3, 3,), (5, 5),)
with pytest.raises(ValueError):
broadcast_chunks(a, b)
a = ((1,), (5, 5),)
b = ((1,), (5, 5),)
assert broadcast_chunks(a, b) == a
a = ((1,), (np.nan, np.nan, np.nan),)
b = ((3, 3), (1,),)
r = broadcast_chunks(a, b)
assert r[0] == b[0] and np.allclose(r[1], a[1], equal_nan=True)
a = ((3, 3), (1,),)
b = ((1,), (np.nan, np.nan, np.nan),)
r = broadcast_chunks(a, b)
assert r[0] == a[0] and np.allclose(r[1], b[1], equal_nan=True)
a = ((3, 3,), (5, 5),)
b = ((1,), (np.nan, np.nan, np.nan),)
with pytest.raises(ValueError):
broadcast_chunks(a, b)
def test_chunks_error():
x = np.ones((10, 10))
with pytest.raises(ValueError):
da.from_array(x, chunks=(5,))
def test_array_compute_forward_kwargs():
x = da.arange(10, chunks=2).sum()
x.compute(bogus_keyword=10)
def test_dont_fuse_outputs():
dsk = {('x', 0): np.array([1, 2]),
('x', 1): (inc, ('x', 0))}
a = da.Array(dsk, 'x', chunks=(2,), shape=(4,), dtype=np.array([1]).dtype)
assert_eq(a, np.array([1, 2, 2, 3], dtype=a.dtype))
def test_dont_dealias_outputs():
dsk = {('x', 0, 0): np.ones((2, 2)),
('x', 0, 1): np.ones((2, 2)),
('x', 1, 0): np.ones((2, 2)),
('x', 1, 1): ('x', 0, 0)}
a = da.Array(dsk, 'x', chunks=(2, 2), shape=(4, 4), dtype=np.ones(1).dtype)
assert_eq(a, np.ones((4, 4)))
def test_timedelta_op():
x = np.array([np.timedelta64(10, 'h')])
y = np.timedelta64(1, 'h')
a = da.from_array(x, chunks=(1,)) / y
assert a.compute() == x / y
def test_to_delayed():
x = da.random.random((4, 4), chunks=(2, 2))
y = x + 10
[[a, b], [c, d]] = y.to_delayed()
assert_eq(a.compute(), y[:2, :2])
s = 2
x = da.from_array(np.array(s), chunks=0)
a = x.to_delayed()[tuple()]
assert a.compute() == s
def test_to_delayed_optimize_graph():
x = da.ones((4, 4), chunks=(2, 2))
y = x[1:][1:][1:][:, 1:][:, 1:][:, 1:]
# optimizations
d = y.to_delayed().flatten().tolist()[0]
assert len([k for k in d.dask if k[0].startswith('getitem')]) == 1
# no optimizations
d2 = y.to_delayed(optimize_graph=False).flatten().tolist()[0]
assert dict(d2.dask) == dict(y.dask)
assert (d.compute() == d2.compute()).all()
def test_cumulative():
x = da.arange(20, chunks=5)
assert_eq(x.cumsum(axis=0), np.arange(20).cumsum())
assert_eq(x.cumprod(axis=0), np.arange(20).cumprod())
assert_eq(da.nancumsum(x, axis=0), nancumsum(np.arange(20)))
assert_eq(da.nancumprod(x, axis=0), nancumprod(np.arange(20)))
a = np.random.random((20))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=5)
assert_eq(da.nancumsum(x, axis=0), nancumsum(a))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a))
a = np.random.random((20, 24))
x = da.from_array(a, chunks=(6, 5))
assert_eq(x.cumsum(axis=0), a.cumsum(axis=0))
assert_eq(x.cumsum(axis=1), a.cumsum(axis=1))
assert_eq(x.cumprod(axis=0), a.cumprod(axis=0))
assert_eq(x.cumprod(axis=1), a.cumprod(axis=1))
assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))
a = np.random.random((20, 24))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5))
assert_eq(da.nancumsum(x, axis=0), nancumsum(a, axis=0))
assert_eq(da.nancumsum(x, axis=1), nancumsum(a, axis=1))
assert_eq(da.nancumprod(x, axis=0), nancumprod(a, axis=0))
assert_eq(da.nancumprod(x, axis=1), nancumprod(a, axis=1))
a = np.random.random((20, 24, 13))
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2, -1, -2, -3]:
assert_eq(x.cumsum(axis=axis), a.cumsum(axis=axis))
assert_eq(x.cumprod(axis=axis), a.cumprod(axis=axis))
assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))
a = np.random.random((20, 24, 13))
rs = np.random.RandomState(0)
a[rs.rand(*a.shape) < 0.5] = np.nan
x = da.from_array(a, chunks=(6, 5, 4))
for axis in [0, 1, 2, -1, -2, -3]:
assert_eq(da.nancumsum(x, axis=axis), nancumsum(a, axis=axis))
assert_eq(da.nancumprod(x, axis=axis), nancumprod(a, axis=axis))
with pytest.raises(ValueError):
x.cumsum(axis=3)
with pytest.raises(ValueError):
x.cumsum(axis=-4)
def test_atop_names():
x = da.ones(5, chunks=(2,))
y = atop(add, 'i', x, 'i', dtype=x.dtype)
assert y.name.startswith('add')
def test_atop_new_axes():
def f(x):
return x[:, None] * np.ones((1, 7))
x = da.ones(5, chunks=2)
y = atop(f, 'aq', x, 'a', new_axes={'q': 7}, concatenate=True,
dtype=x.dtype)
assert y.chunks == ((2, 2, 1), (7,))
assert_eq(y, np.ones((5, 7)))
def f(x):
return x[None, :] * np.ones((7, 1))
x = da.ones(5, chunks=2)
y = atop(f, 'qa', x, 'a', new_axes={'q': 7}, concatenate=True,
dtype=x.dtype)
assert y.chunks == ((7,), (2, 2, 1))
assert_eq(y, np.ones((7, 5)))
def f(x):
y = x.sum(axis=1)
return y[:, None] * np.ones((1, 5))
x = da.ones((4, 6), chunks=(2, 2))
y = atop(f, 'aq', x, 'ab', new_axes={'q': 5}, concatenate=True,
dtype=x.dtype)
assert y.chunks == ((2, 2), (5,))
assert_eq(y, np.ones((4, 5)) * 6)
def test_atop_kwargs():
def f(a, b=0):
return a + b
x = da.ones(5, chunks=(2,))
y = atop(f, 'i', x, 'i', b=10, dtype=x.dtype)
assert_eq(y, np.ones(5) + 10)
def test_atop_chunks():
x = da.ones((5, 5), chunks=((2, 1, 2), (3, 2)))
def double(a, axis=0):
return np.concatenate([a, a], axis=axis)
y = atop(double, 'ij', x, 'ij',
adjust_chunks={'i': lambda n: 2 * n}, axis=0, dtype=x.dtype)
assert y.chunks == ((4, 2, 4), (3, 2))
assert_eq(y, np.ones((10, 5)))
y = atop(double, 'ij', x, 'ij',
adjust_chunks={'j': lambda n: 2 * n}, axis=1, dtype=x.dtype)
assert y.chunks == ((2, 1, 2), (6, 4))
assert_eq(y, np.ones((5, 10)))
x = da.ones((10, 10), chunks=(5, 5))
y = atop(double, 'ij', x, 'ij', axis=0,
adjust_chunks={'i': 10}, dtype=x.dtype)
assert y.chunks == ((10, 10), (5, 5))
assert_eq(y, np.ones((20, 10)))
y = atop(double, 'ij', x, 'ij', axis=0,
adjust_chunks={'i': (10, 10)}, dtype=x.dtype)
assert y.chunks == ((10, 10), (5, 5))
assert_eq(y, np.ones((20, 10)))
def test_atop_raises_on_incorrect_indices():
x = da.arange(5, chunks=3)
with pytest.raises(ValueError) as info:
da.atop(lambda x: x, 'ii', x, 'ii', dtype=int)
assert 'ii' in str(info.value)
assert '1' in str(info.value)
def test_from_delayed():
v = delayed(np.ones)((5, 3))
x = from_delayed(v, shape=(5, 3), dtype=np.ones(0).dtype)
assert isinstance(x, Array)
assert_eq(x, np.ones((5, 3)))
def test_A_property():
x = da.ones(5, chunks=(2,))
assert x.A is x
def test_copy_mutate():
x = da.arange(5, chunks=(2,))
y = x.copy()
memo = {}
y2 = copy.deepcopy(x, memo=memo)
x[x % 2 == 0] = -1
xx = np.arange(5)
xx[xx % 2 == 0] = -1
assert_eq(x, xx)
assert_eq(y, np.arange(5))
assert_eq(y2, np.arange(5))
assert memo[id(x)] is y2
def test_npartitions():
assert da.ones(5, chunks=(2,)).npartitions == 3
assert da.ones((5, 5), chunks=(2, 3)).npartitions == 6
def test_astype_gh1151():
a = np.arange(5).astype(np.int32)
b = da.from_array(a, (1,))
assert_eq(a.astype(np.int16), b.astype(np.int16))
def test_elemwise_name():
assert (da.ones(5, chunks=2) + 1).name.startswith('add-')
def test_map_blocks_name():
assert da.ones(5, chunks=2).map_blocks(inc).name.startswith('inc-')
def test_from_array_names():
pytest.importorskip('distributed')
x = np.ones(10)
d = da.from_array(x, chunks=2)
names = countby(key_split, d.dask)
assert set(names.values()) == set([1, 5])
def test_array_picklable():
from pickle import loads, dumps
a = da.arange(100, chunks=25)
a2 = loads(dumps(a))
assert_eq(a, a2)
def test_from_array_raises_on_bad_chunks():
x = np.ones(10)
with pytest.raises(ValueError):
da.from_array(x, chunks=(5, 5, 5))
# with pytest.raises(ValueError):
# da.from_array(x, chunks=100)
with pytest.raises(ValueError):
da.from_array(x, chunks=((5, 5, 5),))
def test_concatenate_axes():
x = np.ones((2, 2, 2))
assert_eq(concatenate_axes([x, x], axes=[0]),
np.ones((4, 2, 2)))
assert_eq(concatenate_axes([x, x, x], axes=[0]),
np.ones((6, 2, 2)))
assert_eq(concatenate_axes([x, x], axes=[1]),
np.ones((2, 4, 2)))
assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 1]),
np.ones((4, 4, 2)))
assert_eq(concatenate_axes([[x, x], [x, x]], axes=[0, 2]),
np.ones((4, 2, 4)))
assert_eq(concatenate_axes([[x, x, x], [x, x, x]], axes=[1, 2]),
np.ones((2, 4, 6)))
with pytest.raises(ValueError):
concatenate_axes([[x, x], [x, x]], axes=[0]) # not all nested lists accounted for
with pytest.raises(ValueError):
concatenate_axes([x, x], axes=[0, 1, 2, 3]) # too many axes
def test_atop_concatenate():
x = da.ones((4, 4, 4), chunks=(2, 2, 2))
y = da.ones((4, 4), chunks=(2, 2))
def f(a, b):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert a.shape == (2, 4, 4)
assert b.shape == (4, 4)
return (a + b).sum(axis=(1, 2))
z = atop(f, 'i', x, 'ijk', y, 'jk', concatenate=True, dtype=x.dtype)
assert_eq(z, np.ones(4) * 32)
z = atop(add, 'ij', y, 'ij', y, 'ij', concatenate=True, dtype=x.dtype)
assert_eq(z, np.ones((4, 4)) * 2)
def f(a, b, c):
assert isinstance(a, np.ndarray)
assert isinstance(b, np.ndarray)
assert isinstance(c, np.ndarray)
assert a.shape == (4, 2, 4)
assert b.shape == (4, 4)
assert c.shape == (4, 2)
return np.ones(5)
z = atop(f, 'j', x, 'ijk', y, 'ki', y, 'ij', concatenate=True,
dtype=x.dtype)
assert_eq(z, np.ones(10), check_shape=False)
def test_common_blockdim():
assert common_blockdim([(5,), (5,)]) == (5,)
assert common_blockdim([(5,), (2, 3,)]) == (2, 3)
assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)
assert common_blockdim([(5, 5), (2, 3, 5)]) == (2, 3, 5)
assert common_blockdim([(5, 2, 3), (2, 3, 5)]) == (2, 3, 2, 3)
assert common_blockdim([(1, 2), (2, 1)]) == (1, 1, 1)
assert common_blockdim([(1, 2, 2), (2, 1, 2), (2, 2, 1)]) == (1, 1, 1, 1, 1)
def test_uneven_chunks_that_fit_neatly():
x = da.arange(10, chunks=((5, 5),))
y = da.ones(10, chunks=((5, 2, 3),))
assert_eq(x + y, np.arange(10) + np.ones(10))
z = x + y
assert z.chunks == ((5, 2, 3),)
def test_elemwise_uneven_chunks():
x = da.arange(10, chunks=((4, 6),))
y = da.ones(10, chunks=((6, 4),))
assert_eq(x + y, np.arange(10) + np.ones(10))
z = x + y
assert z.chunks == ((4, 2, 4),)
x = da.random.random((10, 10), chunks=((4, 6), (5, 2, 3)))
y = da.random.random((4, 10, 10), chunks=((2, 2), (6, 4), (2, 3, 5)))
z = x + y
assert_eq(x + y, x.compute() + y.compute())
assert z.chunks == ((2, 2), (4, 2, 4), (2, 3, 2, 3))
def test_uneven_chunks_atop():
x = da.random.random((10, 10), chunks=((2, 3, 2, 3), (5, 5)))
y = da.random.random((10, 10), chunks=((4, 4, 2), (4, 2, 4)))
z = atop(np.dot, 'ik', x, 'ij', y, 'jk', dtype=x.dtype, concatenate=True)
assert z.chunks == (x.chunks[0], y.chunks[1])
assert_eq(z, x.compute().dot(y))
def test_warn_bad_rechunking():
x = da.ones((20, 20), chunks=(20, 1))
y = da.ones((20, 20), chunks=(1, 20))
with warnings.catch_warnings(record=True) as record:
x + y
assert record
assert '20' in record[0].message.args[0]
def test_optimize_fuse_keys():
x = da.ones(10, chunks=(5,))
y = x + 1
z = y + 1
dsk = z.__dask_optimize__(z.dask, z.__dask_keys__())
assert not set(y.dask) & set(dsk)
dsk = z.__dask_optimize__(z.dask, z.__dask_keys__(),
fuse_keys=y.__dask_keys__())
assert all(k in dsk for k in y.__dask_keys__())
def test_concatenate_stack_dont_warn():
with warnings.catch_warnings(record=True) as record:
da.concatenate([da.ones(2, chunks=1)] * 62)
assert not record
with warnings.catch_warnings(record=True) as record:
da.stack([da.ones(2, chunks=1)] * 62)
assert not record
def test_map_blocks_delayed():
x = da.ones((10, 10), chunks=(5, 5))
y = np.ones((5, 5))
z = x.map_blocks(add, y, dtype=x.dtype)
yy = delayed(y)
zz = x.map_blocks(add, yy, dtype=x.dtype)
assert_eq(z, zz)
assert yy.key in zz.dask
def test_no_chunks():
X = np.arange(11)
dsk = {('x', 0): np.arange(5), ('x', 1): np.arange(5, 11)}
x = Array(dsk, 'x', ((np.nan, np.nan,),), np.arange(1).dtype)
assert_eq(x + 1, X + 1)
assert_eq(x.sum(), X.sum())
assert_eq((x + 1).std(), (X + 1).std())
assert_eq((x + x).std(), (X + X).std())
assert_eq((x + x).std(keepdims=True), (X + X).std(keepdims=True))
def test_no_chunks_2d():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((np.nan, np.nan), (np.nan, np.nan, np.nan))
with pytest.warns(None): # zero division warning
assert_eq(da.log(x), np.log(X))
assert_eq(x.T, X.T)
assert_eq(x.sum(axis=0, keepdims=True), X.sum(axis=0, keepdims=True))
assert_eq(x.sum(axis=1, keepdims=True), X.sum(axis=1, keepdims=True))
assert_eq(x.dot(x.T + 1), X.dot(X.T + 1))
def test_no_chunks_yes_chunks():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert (x + 1).chunks == ((2, 2), (np.nan, np.nan, np.nan))
assert (x.T).chunks == ((np.nan, np.nan, np.nan), (2, 2))
assert (x.dot(x.T)).chunks == ((2, 2), (2, 2))
def test_raise_informative_errors_no_chunks():
X = np.arange(10)
a = da.from_array(X, chunks=(5, 5))
a._chunks = ((np.nan, np.nan),)
b = da.from_array(X, chunks=(4, 4, 2))
b._chunks = ((np.nan, np.nan, np.nan),)
for op in [lambda: a + b,
lambda: a[1],
lambda: a[::2],
lambda: a[-5],
lambda: a.rechunk(3),
lambda: a.reshape(2, 5)]:
with pytest.raises(ValueError) as e:
op()
if 'chunk' not in str(e) or 'unknown' not in str(e):
op()
def test_no_chunks_slicing_2d():
X = np.arange(24).reshape((4, 6))
x = da.from_array(X, chunks=(2, 2))
x._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert_eq(x[0], X[0])
for op in [lambda: x[:, 4],
lambda: x[:, ::2],
lambda: x[0, 2:4]]:
with pytest.raises(ValueError) as e:
op()
assert 'chunk' in str(e) and 'unknown' in str(e)
def test_index_array_with_array_1d():
x = np.arange(10)
dx = da.from_array(x, chunks=(5,))
dx._chunks = ((np.nan, np.nan),)
assert_eq(x[x > 6], dx[dx > 6])
assert_eq(x[x % 2 == 0], dx[dx % 2 == 0])
dy = da.ones(11, chunks=(3,))
with pytest.raises(ValueError):
dx[dy > 5]
def test_index_array_with_array_2d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x, chunks=(2, 2))
dx._chunks = ((2, 2), (np.nan, np.nan, np.nan))
assert (sorted(x[x % 2 == 0].tolist()) ==
sorted(dx[dx % 2 == 0].compute().tolist()))
assert (sorted(x[x > 6].tolist()) ==
sorted(dx[dx > 6].compute().tolist()))
@pytest.mark.xfail(reason='Chunking does not align well')
def test_index_array_with_array_3d_2d():
x = np.arange(4**3).reshape((4, 4, 4))
dx = da.from_array(x, chunks=(2, 2, 2))
ind = np.random.random((4, 4)) > 0.5
ind = np.arange(4 ** 2).reshape((4, 4)) % 2 == 0
dind = da.from_array(ind, (2, 2))
assert_eq(x[ind], dx[dind])
assert_eq(x[:, ind], dx[:, dind])
def test_setitem_1d():
x = np.arange(10)
dx = da.from_array(x.copy(), chunks=(5,))
x[x > 6] = -1
x[x % 2 == 0] = -2
dx[dx > 6] = -1
dx[dx % 2 == 0] = -2
assert_eq(x, dx)
def test_setitem_2d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x.copy(), chunks=(2, 2))
x[x > 6] = -1
x[x % 2 == 0] = -2
dx[dx > 6] = -1
dx[dx % 2 == 0] = -2
assert_eq(x, dx)
@pytest.mark.skipif(np.__version__ >= '1.13.0',
reason='boolean slicing rules changed')
def test_setitem_mixed_d():
x = np.arange(24).reshape((4, 6))
dx = da.from_array(x, chunks=(2, 2))
x[x[0, None] > 2] = -1
dx[dx[0, None] > 2] = -1
assert_eq(x, dx)
x[x[None, 0] > 2] = -1
dx[dx[None, 0] > 2] = -1
assert_eq(x, dx)
def test_setitem_errs():
x = da.ones((4, 4), chunks=(2, 2))
with pytest.raises(ValueError):
x[x > 1] = x
def test_zero_slice_dtypes():
x = da.arange(5, chunks=1)
y = x[[]]
assert y.dtype == x.dtype
assert y.shape == (0,)
assert_eq(x[[]], np.arange(5)[[]])
def test_zero_sized_array_rechunk():
x = da.arange(5, chunks=1)[:0]
y = da.atop(identity, 'i', x, 'i', dtype=x.dtype)
assert_eq(x, y)
def test_atop_zero_shape():
da.atop(lambda x: x, 'i',
da.arange(10, chunks=10), 'i',
da.from_array(np.ones((0, 2)), ((0,), 2)), 'ab',
da.from_array(np.ones((0,)), ((0,),)), 'a',
dtype='float64')
def test_atop_zero_shape_new_axes():
da.atop(lambda x: np.ones(42), 'i',
da.from_array(np.ones((0, 2)), ((0,), 2)), 'ab',
da.from_array(np.ones((0,)), ((0,),)), 'a',
dtype='float64', new_axes={'i': 42})
def test_broadcast_against_zero_shape():
assert_eq(da.arange(1, chunks=1)[:0] + 0,
np.arange(1)[:0] + 0)
assert_eq(da.arange(1, chunks=1)[:0] + 0.1,
np.arange(1)[:0] + 0.1)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0,
np.ones((5, 5))[:0] + 0)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:0] + 0.1,
np.ones((5, 5))[:0] + 0.1)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0,
np.ones((5, 5))[:, :0] + 0)
assert_eq(da.ones((5, 5), chunks=(2, 3))[:, :0] + 0.1,
np.ones((5, 5))[:, :0] + 0.1)
def test_from_array_name():
x = np.array([1, 2, 3, 4, 5])
chunks = x.shape
# Default is tokenize the array
dx = da.from_array(x, chunks=chunks)
hashed_name = dx.name
assert da.from_array(x, chunks=chunks).name == hashed_name
# Specify name directly
assert da.from_array(x, chunks=chunks, name='x').name == 'x'
# False gives a random name
dx2 = da.from_array(x, chunks=chunks, name=False)
dx3 = da.from_array(x, chunks=chunks, name=False)
assert dx2.name != hashed_name
assert dx3.name != hashed_name
assert dx2.name != dx3.name
def test_concatenate_errs():
with pytest.raises(ValueError) as e:
da.concatenate([da.zeros((2, 1), chunks=(2, 1)),
da.zeros((2, 3), chunks=(2, 3))])
assert 'shape' in str(e).lower()
assert '(2, 1)' in str(e)
with pytest.raises(ValueError):
da.concatenate([da.zeros((1, 2), chunks=(1, 2)),
da.zeros((3, 2), chunks=(3, 2))], axis=1)
def test_stack_errs():
with pytest.raises(ValueError) as e:
da.stack([da.zeros((2,), chunks=(2))] * 10 +
[da.zeros((3,), chunks=(3))] * 10)
assert 'shape' in str(e.value).lower()
assert '(2,)' in str(e.value)
assert len(str(e.value)) < 105
def test_atop_with_numpy_arrays():
x = np.ones(10)
y = da.ones(10, chunks=(5,))
assert_eq(x + y, x + x)
s = da.sum(x)
assert any(x is v for v in s.dask.values())
@pytest.mark.parametrize('chunks', (100, 6))
@pytest.mark.parametrize('other', [[0, 0, 1], [2, 1, 3], (0, 0, 1)])
def test_elemwise_with_lists(chunks, other):
x = np.arange(12).reshape((4, 3))
d = da.arange(12, chunks=chunks).reshape((4, 3))
x2 = np.vstack([x[:, 0], x[:, 1], x[:, 2]]).T
d2 = da.vstack([d[:, 0], d[:, 1], d[:, 2]]).T
assert_eq(x2, d2)
x3 = x2 * other
d3 = d2 * other
assert_eq(x3, d3)
def test_constructor_plugin():
L = []
L2 = []
with dask.config.set(array_plugins=[L.append, L2.append]):
x = da.ones(10, chunks=5)
y = x + 1
assert L == L2 == [x, y]
with dask.config.set(array_plugins=[lambda x: x.compute()]):
x = da.ones(10, chunks=5)
y = x + 1
assert isinstance(y, np.ndarray)
assert len(L) == 2
def test_no_warnings_on_metadata():
x = da.ones(5, chunks=3)
with warnings.catch_warnings(record=True) as record:
da.arccos(x)
assert not record
def test_delayed_array_key_hygeine():
a = da.zeros((1,), chunks=(1,))
d = delayed(identity)(a)
b = da.from_delayed(d, shape=a.shape, dtype=a.dtype)
assert_eq(a, b)
def test_empty_chunks_in_array_len():
x = da.ones((), chunks=())
with pytest.raises(TypeError) as exc_info:
len(x)
err_msg = 'len() of unsized object'
assert err_msg in str(exc_info.value)
@pytest.mark.parametrize('dtype', [None, [('a', 'f4'), ('b', object)]])
def test_meta(dtype):
a = da.zeros((1,), chunks=(1,))
assert a._meta.dtype == a.dtype
assert isinstance(a._meta, np.ndarray)
assert a.nbytes < 1000
@pytest.mark.parametrize('shape,limit,expected', [
(100, 10, (10,) * 10),
(20, 10, (10, 10)),
(20, 5, (5, 5, 5, 5)),
(24, 5, (4, 4, 4, 4, 4, 4)), # common factor is close, use it
(23, 5, (5, 5, 5, 5, 3)), # relatively prime, don't use 1s
(1000, 167, (125,) * 8), # find close value
])
def test_normalize_chunks_auto_1d(shape, limit, expected):
result = normalize_chunks('auto', (shape,), limit=limit * 8, dtype=np.float64)
assert result == (expected,)
@pytest.mark.parametrize('shape,chunks,limit,expected', [
((20, 20), ('auto', 2), 20, ((10, 10), (2,) * 10)),
((20, 20), ('auto', (2, 2, 2, 2, 2, 5, 5)), 20, ((4, 4, 4, 4, 4), (2, 2, 2, 2, 2, 5, 5))),
((1, 20), 'auto', 10, ((1,), (10, 10))),
])
def test_normalize_chunks_auto_2d(shape, chunks, limit, expected):
result = normalize_chunks(chunks, shape, limit=limit, dtype='uint8')
assert result == expected
def test_normalize_chunks_auto_3d():
result = normalize_chunks(('auto', 'auto', 2), (20, 20, 20), limit=200, dtype='uint8')
expected = ((10, 10), (10, 10), (2,) * 10)
assert result == expected
result = normalize_chunks('auto', (20, 20, 20), limit=8, dtype='uint8')
expected = ((2,) * 10,) * 3
assert result == expected
def test_constructors_chunks_dict():
x = da.ones((20, 20), chunks={0: 10, 1: 5})
assert x.chunks == ((10, 10), (5, 5, 5, 5))
x = da.ones((20, 20), chunks={0: 10, 1: "auto"})
assert x.chunks == ((10, 10), (20,))
def test_from_array_chunks_dict():
with dask.config.set({'array.chunk-size': '128kiB'}):
x = np.empty((100, 100, 100))
y = da.from_array(x, chunks={0: 10, 1: -1, 2: 'auto'})
z = da.from_array(x, chunks=(10, 100, 10))
assert y.chunks == z.chunks
@pytest.mark.parametrize('dtype', [object, [('a', object), ('b', int)]])
def test_normalize_chunks_object_dtype(dtype):
x = np.array(['a', 'abc'], dtype=object)
with pytest.raises(NotImplementedError):
da.from_array(x, chunks='auto')
def test_normalize_chunks_tuples_of_tuples():
result = normalize_chunks(((2, 3, 5), 'auto'), (10, 10), limit=10, dtype=np.uint8)
expected = ((2, 3, 5), (2, 2, 2, 2, 2))
assert result == expected
def test_normalize_chunks_nan():
with pytest.raises(ValueError) as info:
normalize_chunks('auto', (np.nan,), limit=10, dtype=np.uint8)
assert "auto" in str(info.value)
with pytest.raises(ValueError) as info:
normalize_chunks(((np.nan, np.nan), 'auto'), (10, 10), limit=10, dtype=np.uint8)
assert "auto" in str(info.value)
def test_zarr_roundtrip():
pytest.importorskip('zarr')
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
@pytest.mark.parametrize('compute', [False, True])
def test_zarr_return_stored(compute):
pytest.importorskip('zarr')
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a2 = a.to_zarr(d, compute=compute, return_stored=True)
assert isinstance(a2, Array)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_existing_array():
zarr = pytest.importorskip('zarr')
c = (1, 1)
a = da.ones((3, 3), chunks=c)
z = zarr.zeros_like(a, chunks=c)
a.to_zarr(z)
a2 = da.from_zarr(z)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_read_zarr_chunks():
pytest.importorskip('zarr')
a = da.zeros((9, ), chunks=(3, ))
with tmpdir() as d:
a.to_zarr(d)
arr = da.from_zarr(d, chunks=(5, ))
assert arr.chunks == ((5, 4), )
def test_zarr_pass_mapper():
pytest.importorskip('zarr')
import zarr.storage
with tmpdir() as d:
mapper = zarr.storage.DirectoryStore(d)
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(mapper)
a2 = da.from_zarr(mapper)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_zarr_group():
zarr = pytest.importorskip('zarr')
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d, component='test')
with pytest.raises((OSError, ValueError)):
a.to_zarr(d, component='test', overwrite=False)
a.to_zarr(d, component='test', overwrite=True)
# second time is fine, group exists
a.to_zarr(d, component='test2', overwrite=False)
a.to_zarr(d, component='nested/test', overwrite=False)
group = zarr.open_group(d, mode='r')
assert list(group) == ['nested', 'test', 'test2']
assert 'test' in group['nested']
a2 = da.from_zarr(d, component='test')
assert_eq(a, a2)
assert a2.chunks == a.chunks
@pytest.mark.parametrize('data', [[( ), True],
[((1, ),), True],
[((1, 1, 1),), True],
[((1, ), (1, )), True],
[((2, 2, 1), ), True],
[((2, 2, 3), ), False],
[((1, 1, 1), (2, 2, 3)), False],
[((1, 2, 1), ), False]
])
def test_regular_chunks(data):
chunkset, expected = data
assert da.core._check_regular_chunks(chunkset) == expected
def test_zarr_nocompute():
pytest.importorskip('zarr')
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
out = a.to_zarr(d, compute=False)
assert isinstance(out, Delayed)
dask.compute(out)
a2 = da.from_zarr(d)
assert_eq(a, a2)
assert a2.chunks == a.chunks
def test_blocks_indexer():
x = da.arange(10, chunks=2)
assert isinstance(x.blocks[0], da.Array)
assert_eq(x.blocks[0], x[:2])
assert_eq(x.blocks[-1], x[-2:])
assert_eq(x.blocks[:3], x[:6])
assert_eq(x.blocks[[0, 1, 2]], x[:6])
assert_eq(x.blocks[[3, 0, 2]], np.array([6, 7, 0, 1, 4, 5]))
x = da.random.random((20, 20), chunks=(4, 5))
assert_eq(x.blocks[0], x[:4])
assert_eq(x.blocks[0, :3], x[:4, :15])
assert_eq(x.blocks[:, :3], x[:, :15])
x = da.ones((40, 40, 40), chunks=(10, 10, 10))
assert_eq(x.blocks[0, :, 0], np.ones((10, 40, 10)))
x = da.ones((2, 2), chunks=1)
with pytest.raises(ValueError):
x.blocks[[0, 1], [0, 1]]
with pytest.raises(ValueError):
x.blocks[np.array([0, 1]), [0, 1]]
with pytest.raises(ValueError) as info:
x.blocks[np.array([0, 1]), np.array([0, 1])]
assert "list" in str(info.value)
with pytest.raises(ValueError) as info:
x.blocks[None, :, :]
assert "newaxis" in str(info.value) and "not supported" in str(info.value)
with pytest.raises(IndexError) as info:
x.blocks[100, 100]
def test_dask_array_holds_scipy_sparse_containers():
sparse = pytest.importorskip('scipy.sparse')
x = da.random.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
y = x.map_blocks(sparse.csr_matrix)
vs = y.to_delayed().flatten().tolist()
values = dask.compute(*vs, scheduler='single-threaded')
assert all(isinstance(v, sparse.csr_matrix) for v in values)
| gpl-3.0 | 8,126,485,098,563,362,000 | 29.306061 | 94 | 0.520393 | false |
TaskEvolution/Task-Coach-Evolution | taskcoach/tests/unittests/thirdPartySoftwareTests/APSchedulerTest.py | 1 | 1826 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <[email protected]>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from taskcoachlib.domain import date
from taskcoachlib.thirdparty import apscheduler
import test
class APSchedulerTest(test.TestCase):
def setUp(self):
super(APSchedulerTest, self).setUp()
self.jobCalled = 0
self.scheduler = apscheduler.scheduler.Scheduler()
self.scheduler.start()
def job(self):
self.jobCalled += 1
def testScheduleJob(self):
self.scheduler.add_date_job(self.job, date.Now() + date.TimeDelta(microseconds=650), misfire_grace_time=0)
while self.jobCalled == 0:
pass
self.assertEqual(1, self.jobCalled)
def testScheduleJobInThePastRaisesValueError(self):
self.assertRaises(ValueError,
lambda: self.scheduler.add_date_job(self.job, date.Now() - date.TimeDelta(microseconds=500)))
def testScheduleJobWithInterval(self):
self.scheduler.add_interval_job(self.job, seconds=0.01)
while self.jobCalled < 2:
pass
self.assertEqual(2, self.jobCalled)
self.scheduler.unschedule_func(self.job)
| gpl-3.0 | -2,661,469,799,451,025,000 | 36.265306 | 119 | 0.708105 | false |
clouserw/zamboni | sites/stage/settings_mkt.py | 1 | 5626 | """private_mkt will be populated from puppet and placed in this directory"""
from mkt.settings import * # noqa
from settings_base import * # noqa
import private_mkt
DOMAIN = 'marketplace.allizom.org'
SERVER_EMAIL = '[email protected]'
DOMAIN = "marketplace.allizom.org"
SITE_URL = 'https://marketplace.allizom.org'
BROWSERID_AUDIENCES = [SITE_URL]
STATIC_URL = os.getenv('CUSTOM_CDN', 'https://marketplace-cdn.allizom.org/')
LOCAL_MIRROR_URL = '%s_files' % STATIC_URL
CSP_SCRIPT_SRC = CSP_SCRIPT_SRC + (STATIC_URL[:-1],)
ADDON_ICON_URL = 'img/uploads/addon_icons/%s/%s-%s.png?modified=%s'
PREVIEW_THUMBNAIL_URL = 'img/uploads/previews/thumbs/%s/%d.png?modified=%d'
PREVIEW_FULL_URL = 'img/uploads/previews/full/%s/%d.%s?modified=%d'
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN
MEDIA_URL = STATIC_URL + 'media/'
CACHE_PREFIX = 'stage.mkt.%s' % CACHE_PREFIX
CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_PREFIX
CACHES['default']['KEY_PREFIX'] = CACHE_PREFIX
CACHE_MACHINE_ENABLED = False
LOG_LEVEL = logging.DEBUG
# The django statsd client to use, see django-statsd for more.
# STATSD_CLIENT = 'django_statsd.clients.moz_heka'
SYSLOG_TAG = "http_app_mkt_stage"
SYSLOG_TAG2 = "http_app_mkt_stage_timer"
SYSLOG_CSP = "http_app_mkt_stage_csp"
STATSD_PREFIX = 'marketplace-stage'
# Celery
BROKER_URL = private_mkt.BROKER_URL
CELERY_ALWAYS_EAGER = False
CELERY_IGNORE_RESULT = True
CELERY_DISABLE_RATE_LIMITS = True
CELERYD_PREFETCH_MULTIPLIER = 1
WEBAPPS_RECEIPT_KEY = private_mkt.WEBAPPS_RECEIPT_KEY
WEBAPPS_RECEIPT_URL = private_mkt.WEBAPPS_RECEIPT_URL
WEBAPPS_UNIQUE_BY_DOMAIN = True
SENTRY_DSN = private_mkt.SENTRY_DSN
SOLITUDE_HOSTS = ('https://payments.allizom.org',)
SOLITUDE_OAUTH = {'key': private_mkt.SOLITUDE_OAUTH_KEY,
'secret': private_mkt.SOLITUDE_OAUTH_SECRET}
WEBAPPS_PUBLIC_KEY_DIRECTORY = NETAPP_STORAGE + '/public_keys'
PRODUCT_ICON_PATH = NETAPP_STORAGE + '/product-icons'
DUMPED_APPS_PATH = NETAPP_STORAGE + '/dumped-apps'
DUMPED_USERS_PATH = NETAPP_STORAGE + '/dumped-users'
GOOGLE_ANALYTICS_DOMAIN = 'marketplace.firefox.com'
VALIDATOR_TIMEOUT = 180
VALIDATOR_IAF_URLS = ['https://marketplace.firefox.com',
'https://marketplace.allizom.org',
'https://marketplace-dev.allizom.org',
'https://marketplace-altdev.allizom.org']
if getattr(private_mkt, 'LOAD_TESTING', False):
# mock the authentication and use django_fakeauth for this
AUTHENTICATION_BACKENDS = (
('django_fakeauth.FakeAuthBackend',) + AUTHENTICATION_BACKENDS)
MIDDLEWARE_CLASSES.insert(
MIDDLEWARE_CLASSES.index('mkt.access.middleware.ACLMiddleware'),
'django_fakeauth.FakeAuthMiddleware')
FAKEAUTH_TOKEN = private_mkt.FAKEAUTH_TOKEN
# we are also creating access tokens for OAuth, here are the keys and
# secrets used for them
API_PASSWORD = getattr(private_mkt, 'API_PASSWORD', FAKEAUTH_TOKEN)
AMO_LANGUAGES = AMO_LANGUAGES + ('dbg', 'rtl', 'ar', 'ee', 'ff', 'ha', 'ig',
'ln', 'sw', 'tl', 'wo', 'yo')
LANGUAGES = lazy(langs, dict)(AMO_LANGUAGES)
LANGUAGE_URL_MAP = dict([(i.lower(), i) for i in AMO_LANGUAGES])
# Bug 748403
SIGNING_SERVER = private_mkt.SIGNING_SERVER
SIGNING_SERVER_ACTIVE = True
SIGNING_VALID_ISSUERS = ['marketplace-cdn.allizom.org']
# Bug 793876
SIGNED_APPS_KEY = private_mkt.SIGNED_APPS_KEY
SIGNED_APPS_SERVER_ACTIVE = True
SIGNED_APPS_SERVER = private_mkt.SIGNED_APPS_SERVER
SIGNED_APPS_REVIEWER_SERVER_ACTIVE = True
SIGNED_APPS_REVIEWER_SERVER = private_mkt.SIGNED_APPS_REVIEWER_SERVER
# See mkt/settings.py for more info.
APP_PURCHASE_KEY = DOMAIN
APP_PURCHASE_AUD = DOMAIN
APP_PURCHASE_TYP = 'mozilla-stage/payments/pay/v1'
APP_PURCHASE_SECRET = private_mkt.APP_PURCHASE_SECRET
MONOLITH_PASSWORD = private_mkt.MONOLITH_PASSWORD
# This is mainly for Marionette tests.
WEBAPP_MANIFEST_NAME = 'Marketplace Stage'
ENABLE_API_ERROR_SERVICE = True
NEWRELIC_INI = '/etc/newrelic.d/marketplace.allizom.org.ini'
ES_DEFAULT_NUM_REPLICAS = 2
ES_USE_PLUGINS = True
BANGO_BASE_PORTAL_URL = 'https://mozilla.bango.com/login/al.aspx?'
ALLOWED_CLIENTS_EMAIL_API = private_mkt.ALLOWED_CLIENTS_EMAIL_API
POSTFIX_AUTH_TOKEN = private_mkt.POSTFIX_AUTH_TOKEN
POSTFIX_DOMAIN = DOMAIN
MONOLITH_INDEX = 'mktstage-time_*'
# IARC content ratings.
IARC_ENV = 'test'
IARC_MOCK = False
IARC_PASSWORD = private_mkt.IARC_PASSWORD
IARC_PLATFORM = 'Firefox'
IARC_SERVICE_ENDPOINT = 'https://www.globalratings.com/IARCDEMOService/IARCServices.svc' # noqa
IARC_STOREFRONT_ID = 4
IARC_SUBMISSION_ENDPOINT = 'https://www.globalratings.com/IARCDEMORating/Submission.aspx' # noqa
IARC_ALLOW_CERT_REUSE = True
PRE_GENERATE_APKS = True
PRE_GENERATE_APK_URL = \
'https://apk-controller.stage.mozaws.net/application.apk'
BOKU_SIGNUP_URL = 'https://merchants.boku.com/signup/signup_business?params=jEHWaTM7zm5cbPpheT2iS4xB1mkzO85uxVAo7rs7LVgy5JYGMWnUYDvxyEk8lxalP1pJZFv5d9oI%0A9bcXqxv0MQ%3D%3D' # noqa
FXA_AUTH_DOMAIN = 'api.accounts.firefox.com'
FXA_OAUTH_URL = 'https://oauth.accounts.firefox.com'
FXA_CLIENT_ID = getattr(private_mkt, 'FXA_CLIENT_ID', '')
FXA_CLIENT_SECRET = getattr(private_mkt, 'FXA_CLIENT_SECRET', '')
FXA_SECRETS = {
FXA_CLIENT_ID: FXA_CLIENT_SECRET,
}
DEFAULT_PAYMENT_PROVIDER = 'bango'
PAYMENT_PROVIDERS = ['bango', 'boku']
RECOMMENDATIONS_API_URL = 'https://recommend.allizom.org'
RECOMMENDATIONS_ENABLED = True
QA_APP_ID = 500427
DEV_PAY_PROVIDERS = {
APP_PURCHASE_TYP: SITE_URL + '/mozpay/?req={jwt}',
}
# Bug 1145338
IAF_OVERRIDE_APPS = private_mkt.IAF_OVERRIDE_APPS
| bsd-3-clause | 5,491,280,167,152,898,000 | 31.709302 | 180 | 0.724849 | false |
h4ck3rm1k3/pywikibot-core | pywikibot/xmlreader.py | 3 | 6434 | # -*- coding: utf-8 -*-
"""
XML reading module.
Each XmlEntry object represents a page, as read from an XML source
The XmlDump class reads a pages_current XML dump (like the ones offered on
https://dumps.wikimedia.org/backup-index.html) and offers a generator over
XmlEntry objects which can be used by other bots.
"""
#
# (C) Pywikibot team, 2005-2013
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import re
import threading
from xml.etree.cElementTree import iterparse
import xml.sax
from pywikibot.tools import open_archive
def parseRestrictions(restrictions):
"""
Parse the characters within a restrictions tag.
Returns strings representing user groups allowed to edit and
to move a page, where None means there are no restrictions.
"""
if not restrictions:
return None, None
editRestriction = None
moveRestriction = None
editLockMatch = re.search('edit=([^:]*)', restrictions)
if editLockMatch:
editRestriction = editLockMatch.group(1)
moveLockMatch = re.search('move=([^:]*)', restrictions)
if moveLockMatch:
moveRestriction = moveLockMatch.group(1)
if restrictions == 'sysop':
editRestriction = 'sysop'
moveRestriction = 'sysop'
return editRestriction, moveRestriction
class XmlEntry(object):
"""Represent a page."""
def __init__(self, title, ns, id, text, username, ipedit, timestamp,
editRestriction, moveRestriction, revisionid, comment,
redirect):
"""Constructor."""
# TODO: there are more tags we can read.
self.title = title
self.ns = ns
self.id = id
self.text = text
self.username = username.strip()
self.ipedit = ipedit
self.timestamp = timestamp
self.editRestriction = editRestriction
self.moveRestriction = moveRestriction
self.revisionid = revisionid
self.comment = comment
self.isredirect = redirect
class XmlParserThread(threading.Thread):
"""
XML parser that will run as a single thread.
This allows the XmlDump
generator to yield pages before the parser has finished reading the
entire dump.
There surely are more elegant ways to do this.
"""
def __init__(self, filename, handler):
"""Constructor."""
threading.Thread.__init__(self)
self.filename = filename
self.handler = handler
def run(self):
"""Parse the file in a single thread."""
xml.sax.parse(self.filename, self.handler)
class XmlDump(object):
"""
Represents an XML dump file.
Reads the local file at initialization,
parses it, and offers access to the resulting XmlEntries via a generator.
@param allrevisions: boolean
If True, parse all revisions instead of only the latest one.
Default: False.
"""
def __init__(self, filename, allrevisions=False):
"""Constructor."""
self.filename = filename
if allrevisions:
self._parse = self._parse_all
else:
self._parse = self._parse_only_latest
def parse(self):
"""Generator using cElementTree iterparse function."""
with open_archive(self.filename) as source:
# iterparse's event must be a str but they are unicode with
# unicode_literals in Python 2
context = iterparse(source, events=(str('start'), str('end'),
str('start-ns')))
self.root = None
for event, elem in context:
if event == "start-ns" and elem[0] == "":
self.uri = elem[1]
continue
if event == "start" and self.root is None:
self.root = elem
continue
for rev in self._parse(event, elem):
yield rev
def _parse_only_latest(self, event, elem):
"""Parser that yields only the latest revision."""
if event == "end" and elem.tag == "{%s}page" % self.uri:
self._headers(elem)
revision = elem.find("{%s}revision" % self.uri)
yield self._create_revision(revision)
elem.clear()
self.root.clear()
def _parse_all(self, event, elem):
"""Parser that yields all revisions."""
if event == "start" and elem.tag == "{%s}page" % self.uri:
self._headers(elem)
if event == "end" and elem.tag == "{%s}revision" % self.uri:
yield self._create_revision(elem)
elem.clear()
self.root.clear()
def _headers(self, elem):
"""Extract headers from XML chunk."""
self.title = elem.findtext("{%s}title" % self.uri)
self.ns = elem.findtext("{%s}ns" % self.uri)
self.pageid = elem.findtext("{%s}id" % self.uri)
self.restrictions = elem.findtext("{%s}restrictions" % self.uri)
self.isredirect = elem.findtext("{%s}redirect" % self.uri) is not None
self.editRestriction, self.moveRestriction = parseRestrictions(
self.restrictions)
def _create_revision(self, revision):
"""Create a Single revision."""
revisionid = revision.findtext("{%s}id" % self.uri)
timestamp = revision.findtext("{%s}timestamp" % self.uri)
comment = revision.findtext("{%s}comment" % self.uri)
contributor = revision.find("{%s}contributor" % self.uri)
ipeditor = contributor.findtext("{%s}ip" % self.uri)
username = ipeditor or contributor.findtext("{%s}username" % self.uri)
# could get comment, minor as well
text = revision.findtext("{%s}text" % self.uri)
return XmlEntry(title=self.title,
ns=self.ns,
id=self.pageid,
text=text or u'',
username=username or u'', # username might be deleted
ipedit=bool(ipeditor),
timestamp=timestamp,
editRestriction=self.editRestriction,
moveRestriction=self.moveRestriction,
revisionid=revisionid,
comment=comment,
redirect=self.isredirect
)
| mit | 6,881,866,051,588,237,000 | 32.863158 | 78 | 0.587193 | false |
ooici/marine-integrations | mi/dataset/driver/mflm/dosta/driver.py | 1 | 6129 | """
@package mi.dataset.driver.mflm.dosta.driver
@file marine-integrations/mi/dataset/driver/mflm/dosta/driver.py
@author Emily Hahn
@brief Driver for the mflm_dosta
Release notes:
initial release
"""
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import string
from mi.core.common import BaseEnum
from mi.core.exceptions import ConfigurationException
from mi.core.log import get_logger ; log = get_logger()
from mi.dataset.harvester import SingleFileHarvester, SingleDirectoryHarvester
from mi.dataset.dataset_driver import HarvesterType, DataSetDriverConfigKeys
from mi.dataset.driver.sio_mule.sio_mule_driver import SioMuleDataSetDriver
from mi.dataset.parser.dostad import \
DostadParser, \
DostadRecoveredParser, \
StateKey, \
METADATA_PARTICLE_CLASS_KEY, \
DATA_PARTICLE_CLASS_KEY, \
DostadParserRecoveredDataParticle, \
DostadParserTelemeteredDataParticle, \
DostadParserRecoveredMetadataDataParticle, \
DostadParserTelemeteredMetadataDataParticle
class DataTypeKey(BaseEnum):
"""
These are the possible harvester/parser pairs for this driver
"""
DOSTA_ABCDJM_SIO_TELEMETERED = 'dosta_abcdjm_sio_telemetered'
DOSTA_ABCDJM_SIO_RECOVERED = 'dosta_abcdjm_sio_recovered'
class MflmDOSTADDataSetDriver(SioMuleDataSetDriver):
@classmethod
def stream_config(cls):
# Fill in below with particle stream
return [DostadParserRecoveredDataParticle.type(),
DostadParserTelemeteredDataParticle.type(),
DostadParserRecoveredMetadataDataParticle.type(),
DostadParserTelemeteredMetadataDataParticle.type()]
def __init__(self, config, memento, data_callback, state_callback, event_callback, exception_callback):
# link the data keys to the harvester type, multiple or single file harvester
harvester_type = {
DataTypeKey.DOSTA_ABCDJM_SIO_TELEMETERED: HarvesterType.SINGLE_FILE,
DataTypeKey.DOSTA_ABCDJM_SIO_RECOVERED: HarvesterType.SINGLE_DIRECTORY
}
super(MflmDOSTADDataSetDriver, self).__init__(config,
memento,
data_callback,
state_callback,
event_callback,
exception_callback,
DataTypeKey.list(),
harvester_type=harvester_type)
def _build_parser(self, parser_state, stream_in, data_key=None):
"""
Build and return the parser
"""
config = self._parser_config.get(data_key)
#
# If the key is DOSTA_ABCDJM_SIO_RECOVERED, build the WFP parser.
#
if data_key == DataTypeKey.DOSTA_ABCDJM_SIO_RECOVERED:
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dostad',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: DostadParserRecoveredMetadataDataParticle,
DATA_PARTICLE_CLASS_KEY: DostadParserRecoveredDataParticle
}
})
parser = DostadRecoveredParser(
config,
parser_state,
stream_in,
lambda state, ingested: self._save_parser_state(state, data_key, ingested),
self._data_callback,
self._sample_exception_callback)
return parser
#
# If the key is DOSTA_ABCDJM_SIO_TELEMETERED, build the WFP SIO Mule parser.
#
elif data_key == DataTypeKey.DOSTA_ABCDJM_SIO_TELEMETERED:
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.dostad',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
METADATA_PARTICLE_CLASS_KEY: DostadParserTelemeteredMetadataDataParticle,
DATA_PARTICLE_CLASS_KEY: DostadParserTelemeteredDataParticle
}
})
parser = DostadParser(
config,
parser_state,
stream_in,
lambda state: self._save_parser_state(state, data_key),
self._data_callback,
self._sample_exception_callback)
return parser
#
# If the key is one that we're not expecting, don't build any parser.
#
else:
raise ConfigurationException("Invalid data_key supplied to build parser")
def _build_harvester(self, driver_state):
"""
Build the harvester
@param driver_state The starting driver state
"""
self._harvester = []
if DataTypeKey.DOSTA_ABCDJM_SIO_TELEMETERED in self._harvester_config:
telemetered_harvester = SingleFileHarvester(
self._harvester_config.get(DataTypeKey.DOSTA_ABCDJM_SIO_TELEMETERED),
driver_state[DataTypeKey.DOSTA_ABCDJM_SIO_TELEMETERED],
lambda file_state: self._file_changed_callback(file_state, DataTypeKey.DOSTA_ABCDJM_SIO_TELEMETERED),
self._exception_callback
)
self._harvester.append(telemetered_harvester)
else:
log.warn('No configuration for telemetered harvester, not building')
if DataTypeKey.DOSTA_ABCDJM_SIO_RECOVERED in self._harvester_config:
recovered_harvester = SingleDirectoryHarvester(
self._harvester_config.get(DataTypeKey.DOSTA_ABCDJM_SIO_RECOVERED),
driver_state[DataTypeKey.DOSTA_ABCDJM_SIO_RECOVERED],
lambda filename: self._new_file_callback(filename, DataTypeKey.DOSTA_ABCDJM_SIO_RECOVERED),
lambda modified: self._modified_file_callback(modified, DataTypeKey.DOSTA_ABCDJM_SIO_RECOVERED),
self._exception_callback
)
self._harvester.append(recovered_harvester)
else:
log.warn('No configuration for recovered harvester, not building')
return self._harvester
| bsd-2-clause | 1,925,026,545,317,230,000 | 38.541935 | 117 | 0.640561 | false |
r-mibu/ceilometer | ceilometer/tests/network/services/test_vpnaas.py | 11 | 7391 | #
# Copyright 2014 Cisco Systems,Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_context import context
from oslotest import base
from oslotest import mockpatch
from ceilometer.agent import manager
from ceilometer.agent import plugin_base
from ceilometer.network.services import discovery
from ceilometer.network.services import vpnaas
class _BaseTestVPNPollster(base.BaseTestCase):
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def setUp(self):
super(_BaseTestVPNPollster, self).setUp()
self.addCleanup(mock.patch.stopall)
self.context = context.get_admin_context()
self.manager = manager.AgentManager()
plugin_base._get_keystone = mock.Mock()
plugin_base._get_keystone.service_catalog.get_endpoints = (
mock.MagicMock(return_value={'network': mock.ANY}))
class TestVPNServicesPollster(_BaseTestVPNPollster):
def setUp(self):
super(TestVPNServicesPollster, self).setUp()
self.pollster = vpnaas.VPNServicesPollster()
fake_vpn = self.fake_get_vpn_service()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'vpn_get_all',
return_value=fake_vpn))
@staticmethod
def fake_get_vpn_service():
return [{'status': 'ACTIVE',
'name': 'myvpn',
'description': '',
'admin_state_up': True,
'id': 'fdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a',
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'},
{'status': 'INACTIVE',
'name': 'myvpn',
'description': '',
'admin_state_up': True,
'id': 'cdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a',
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'},
{'status': 'PENDING_CREATE',
'name': 'myvpn',
'description': '',
'id': 'bdde3d818-fdcb-fg4b-de7f-6750dc8a9d7a',
'admin_state_up': True,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'},
{'status': 'error',
'name': 'myvpn',
'description': '',
'id': 'edde3d818-fdcb-fg4b-de7f-6750dc8a9d7a',
'admin_state_up': False,
'subnet_id': 'bbe3d818-bdcb-4e4b-b47f-5650dc8a9d7a',
'tenant_id': 'a4eb9f4938bb418bbc4f8eb31802fefa',
'router_id': 'ade3d818-fdcb-fg4b-de7f-6750dc8a9d7a'},
]
def test_vpn_get_samples(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vpn_service()))
self.assertEqual(3, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_vpn_service()[0][field],
samples[0].resource_metadata[field])
def test_vpn_volume(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vpn_service()))
self.assertEqual(1, samples[0].volume)
self.assertEqual(0, samples[1].volume)
self.assertEqual(2, samples[2].volume)
def test_get_vpn_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_vpn_service()))
self.assertEqual(set(['network.services.vpn']),
set([s.name for s in samples]))
def test_vpn_discovery(self):
discovered_vpns = discovery.VPNServicesDiscovery().discover(
self.manager)
self.assertEqual(3, len(discovered_vpns))
for vpn in self.fake_get_vpn_service():
if vpn['status'] == 'error':
self.assertNotIn(vpn, discovered_vpns)
else:
self.assertIn(vpn, discovered_vpns)
class TestIPSecConnectionsPollster(_BaseTestVPNPollster):
def setUp(self):
super(TestIPSecConnectionsPollster, self).setUp()
self.pollster = vpnaas.IPSecConnectionsPollster()
fake_conns = self.fake_get_ipsec_connections()
self.useFixture(mockpatch.Patch('ceilometer.neutron_client.Client.'
'ipsec_site_connections_get_all',
return_value=fake_conns))
@staticmethod
def fake_get_ipsec_connections():
return [{'name': 'connection1',
'description': 'Remote-connection1',
'peer_address': '192.168.1.10',
'peer_id': '192.168.1.10',
'peer_cidrs': ['192.168.2.0/24',
'192.168.3.0/24'],
'mtu': 1500,
'psk': 'abcd',
'initiator': 'bi-directional',
'dpd': {
'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': 'ade3d818-fdcb-fg4b-de7f-4550dc8a9d7a',
'ipsecpolicy_id': 'fce3d818-fdcb-fg4b-de7f-7850dc8a9d7a',
'vpnservice_id': 'dce3d818-fdcb-fg4b-de7f-5650dc8a9d7a',
'admin_state_up': True,
'tenant_id': 'abe3d818-fdcb-fg4b-de7f-6650dc8a9d7a',
'id': 'fdfbcec-fdcb-fg4b-de7f-6650dc8a9d7a'}
]
def test_conns_get_samples(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_ipsec_connections()))
self.assertEqual(1, len(samples))
for field in self.pollster.FIELDS:
self.assertEqual(self.fake_get_ipsec_connections()[0][field],
samples[0].resource_metadata[field])
def test_get_conns_meter_names(self):
samples = list(self.pollster.get_samples(
self.manager, {},
resources=self.fake_get_ipsec_connections()))
self.assertEqual(set(['network.services.vpn.connections']),
set([s.name for s in samples]))
def test_conns_discovery(self):
discovered_conns = discovery.IPSecConnectionsDiscovery().discover(
self.manager)
self.assertEqual(1, len(discovered_conns))
self.assertEqual(self.fake_get_ipsec_connections(), discovered_conns)
| apache-2.0 | 5,796,234,400,559,251,000 | 41.234286 | 77 | 0.575159 | false |
secynic/nfsinkhole | nfsinkhole/iptables.py | 1 | 13411 | # Copyright (c) 2016-2017 Philip Hane
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .exceptions import (IPTablesError, IPTablesExists, IPTablesNotExists,
SubprocessError)
from .utils import popen_wrapper
import logging
log = logging.getLogger(__name__)
class IPTablesSinkhole:
"""
The class for managing sinkhole configuration within iptables.
Args:
interface: The secondary network interface dedicated to sinkhole
traffic.
Warning: Do not accidentally set this to your primary interface.
It will drop all traffic, and kill your remote access.
interface_addr: The IP address assigned to interface.
log_prefix: Prefix for syslog messages.
protocol: The protocol(s) to log (all traffic will still be dropped).
Accepts a comma separated string of protocols
(tcp,udp,udplite,icmp,esp,ah,sctp) or all.
dport: The destination port(s) to log (for applicable protocols).
Range should be in the format startport:endport or 0,1,2,3,n..
hashlimit: Set the hashlimit rate. Hashlimit is used to tune the
amount of events logged. See the iptables-extensions docs:
http://ipset.netfilter.org/iptables-extensions.man.html
hashlimitmode: Set the hashlimit mode, a comma separated string of
options (srcip,srcport,dstip,dstport). More options here results
in more logs generated.
hashlimitburst: Maximum initial number of packets to match.
hashlimitexpire: Number of milliseconds to keep entries in the hash
table.
srcexclude: Exclude a comma separated string of source IPs/CIDRs from
logging.
"""
def __init__(self, interface=None, interface_addr=None,
log_prefix='"[nfsinkhole] "',
protocol='all', dport='0:65535',
hashlimit='1/h', hashlimitmode='srcip,dstip,dstport',
hashlimitburst='1', hashlimitexpire='1800000',
srcexclude='127.0.0.1'
):
# TODO: add arg checks across all classes
self.interface = interface
self.interface_addr = interface_addr
self.log_prefix = log_prefix
self.protocol = protocol
self.dport = dport
self.hashlimit = hashlimit
self.hashlimitmode = hashlimitmode
self.hashlimitburst = hashlimitburst
self.hashlimitexpire = hashlimitexpire
self.srcexclude = srcexclude
def list_existing_rules(self, filter_io_drop=False):
"""
The function for retrieving current iptables rules related to
nfsinkhole.
Args:
filter_io_drop: Boolean for only showing the DROP rules for INPUT
and OUTPUT. These are not shown by default. This exists to
avoid allowing packets on the interface if the service is down.
If installed, the interface always drops all traffic regardless
of the service state.
Returns:
List: Matching sinkhole lines returned by iptables -S.
Raises:
IPTablesError: A Linux process had an error (stderr).
"""
# Get list summary of iptables rules
cmd = ['iptables', '-S']
existing = []
# Get all of the iptables rules
try:
out, err = popen_wrapper(cmd, sudo=True)
except OSError as e: # pragma: no cover
raise IPTablesError('Error encountered when running process "{0}":'
'\n{1}'.format(' '.join(cmd), e))
# If any errors, iterate them and write to log, then raise
# IPTablesError.
if err: # pragma: no cover
arr = err.splitlines()
raise IPTablesError('Error encountered when running process "{0}":'
'\n{1}'.format(' '.join(cmd), '\n'.join(arr)))
# Iterate the iptables rules, only grabbing nfsinkhole related rules.
arr = out.splitlines()
for line in arr:
tmp_line = line.decode('ascii', 'ignore')
if ('SINKHOLE' in tmp_line
) or filter_io_drop and tmp_line.strip() in [
'-A INPUT -i {0} -j DROP'.format(self.interface),
'-A OUTPUT -o {0} -j DROP'.format(self.interface)
]:
existing.append(tmp_line.strip())
return existing
def create_rules(self):
"""
The function for writing iptables rules related to nfsinkhole.
"""
log.info('Checking for existing iptables rules.')
existing = self.list_existing_rules()
# Existing sinkhole related iptables lines found, can't create.
if len(existing) > 0:
raise IPTablesExists('Existing iptables rules found for '
'nfsinkhole:\n{0}'
''.format('\n'.join(existing)))
log.info('Writing iptables config')
# Create a new iptables chain for logging
tmp_arr = ['iptables', '-N', 'SINKHOLE']
log.info('Writing: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
# Exclude IPs/CIDRs from logging (scanners, monitoring, pen-testers,
# etc):
for addr in self.srcexclude.split(','):
tmp_arr = [
'iptables',
'-A', 'SINKHOLE',
'-s', addr,
'-j', 'RETURN'
]
log.info('Writing: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
# Tell the chain to log and use the prefix self.log_prefix:
tmp_arr = [
'iptables',
'-A', 'SINKHOLE',
'-j', 'LOG',
'--log-prefix', self.log_prefix
]
log.info('Writing: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
# Tell the chain to also log to netfilter (for packet capture):
tmp_arr = ['iptables', '-A', 'SINKHOLE',
'-j', 'NFLOG']
log.info('Writing: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
# Tell the chain to trigger on hashlimit and protocol/port settings
tmp_arr = [
'iptables',
'-i', self.interface,
'-d', self.interface_addr,
'-j', 'SINKHOLE',
'-m', 'hashlimit',
'--hashlimit', self.hashlimit,
'--hashlimit-burst', self.hashlimitburst,
'--hashlimit-mode', self.hashlimitmode,
'--hashlimit-name', 'sinkhole',
'--hashlimit-htable-expire', self.hashlimitexpire,
'-I', 'INPUT', '1'
]
# if --protocol filtered, set mode to multiport with protocol, and
# set destination port if provided and applicable to the protocol(s)
if self.protocol != 'all':
tmp_arr += ['-m', 'multiport', '--protocol', self.protocol]
if self.dport != '0:65535':
tmp_arr += ['--dport', self.dport]
log.info('Writing: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
def create_drop_rule(self):
"""
The function for writing the iptables DROP rule for the interface.
"""
log.info('Checking for existing iptables DROP rules.')
existing = self.list_existing_rules(filter_io_drop=True)
# Existing sinkhole related iptables lines found, can't create.
if len(existing) > 0:
for line in existing:
if line in (
'-A INPUT -i {0} -j DROP'.format(self.interface),
'-A OUTPUT -o {0} -j DROP'.format(self.interface)
):
raise IPTablesExists('Existing iptables DROP rules found '
'for nfsinkhole:\n{0}'
''.format(line))
log.info('Writing iptables DROP config')
# Create rules to drop all I/O traffic:
tmp_arr = [
'iptables',
'-i', self.interface,
'-j', 'DROP',
'-I', 'INPUT', '1'
]
log.info('Writing: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
tmp_arr = [
'iptables',
'-o', self.interface,
'-j', 'DROP',
'-I', 'OUTPUT', '1'
]
log.info('Writing: {0}'.format(' '.join(tmp_arr)))
# TODO: replicate exception handling to other subprocess calls
try:
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
except SubprocessError as e: # pragma: no cover
raise IPTablesError(e)
def delete_rules(self):
"""
The function for deleting iptables rules related to nfsinkhole.
"""
log.info('Checking for existing iptables rules.')
existing = self.list_existing_rules()
# No sinkhole related iptables lines found.
if len(existing) == 0:
raise IPTablesNotExists('No existing rules found.')
log.info('Deleting iptables config (only what was created)')
# Iterate all of the active sinkhole related iptables lines
flush = False
for line in existing:
if '-A SINKHOLE' in line or line == '-N SINKHOLE':
# Don't try to delete the SINKHOLE chain yet, it needs to be
# empty. Set flush to clear it after this loop.
flush = True
elif line not in (
'-A INPUT -i {0} -j DROP'.format(self.interface),
'-A OUTPUT -o {0} -j DROP'.format(self.interface)
):
# Delete a single line (not the SINKHOLE chain itself).
stmt = line.replace('-A', '-D').strip().split(' ')
tmp_arr = ['iptables'] + stmt
log.info('Deleting: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
# The SINKHOLE chain was detected. Remove it.
if flush:
# All lines in the SINKHOLE chain should have been flushed already,
# but run a flush to be sure.
tmp_arr = ['iptables', '-F', 'SINKHOLE']
log.info('Flushing: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
# Now that the SINKHOLE chain has been flushed, we can delete it.
tmp_arr = ['iptables', '-X', 'SINKHOLE']
log.info('Deleting: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
# Return a list of matching lines.
return len(existing)
def delete_drop_rule(self):
"""
The function for deleting the iptables DROP rule for the interface.
"""
log.info('Checking for existing iptables DROP rules.')
existing = self.list_existing_rules(filter_io_drop=True)
# No sinkhole related iptables lines found.
if len(existing) == 0:
raise IPTablesNotExists('No existing rules found.')
log.info('Deleting iptables DROP config.')
count = 0
for line in existing:
if line in (
'-A INPUT -i {0} -j DROP'.format(self.interface),
'-A OUTPUT -o {0} -j DROP'.format(self.interface)
):
count += 1
# Delete a single line (not the SINKHOLE chain itself).
stmt = line.replace('-A', '-D').split(' ')
tmp_arr = ['iptables'] + stmt
log.info('Deleting: {0}'.format(' '.join(tmp_arr)))
popen_wrapper(cmd_arr=tmp_arr, raise_err=True, sudo=True)
# Return the number of matching lines.
return count
| bsd-2-clause | 5,636,203,261,590,857,000 | 36.356546 | 79 | 0.575498 | false |
JPJPJPOPOP/zulip | zerver/webhooks/helloworld/tests.py | 37 | 1526 | # -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
class HelloWorldHookTests(WebhookTestCase):
STREAM_NAME = 'test'
URL_TEMPLATE = "/api/v1/external/helloworld?&api_key={api_key}"
FIXTURE_DIR_NAME = 'hello'
# Note: Include a test function per each distinct message condition your integration supports
def test_hello_message(self):
# type: () -> None
expected_subject = u"Hello World"
expected_message = u"Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Marilyn Monroe](https://en.wikipedia.org/wiki/Marilyn_Monroe)**"
# use fixture named helloworld_hello
self.send_and_test_stream_message('hello', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_goodbye_message(self):
# type: () -> None
expected_subject = u"Hello World"
expected_message = u"Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Goodbye](https://en.wikipedia.org/wiki/Goodbye)**"
# use fixture named helloworld_goodbye
self.send_and_test_stream_message('goodbye', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data("helloworld", fixture_name, file_type="json")
| apache-2.0 | -4,778,303,683,667,878,000 | 48.225806 | 179 | 0.651376 | false |
akash1808/nova | nova/tests/unit/virt/test_block_device.py | 11 | 36872 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo_serialization import jsonutils
import six
from nova import block_device
from nova import context
from nova import exception
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in six.iteritems(db_bdm):
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in six.iteritems(self.driver_classes):
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
for fld, alias in six.iteritems(test_bdm._update_on_save):
test_bdm[alias or fld] = 'fake_changed_value'
test_bdm.save()
for fld, alias in six.iteritems(test_bdm._update_on_save):
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with()
def check_save():
self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed())
# Test that nothing is set on the object if there are no actual changes
test_bdm._bdm_obj.obj_reset_changes()
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
save_mock.side_effect = check_save
test_bdm.save()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
with mock.patch.object(self.volume_api, 'delete') as vol_delete:
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
if delete_on_termination and delete_fail:
vol_delete.side_effect = Exception()
self.assertRaises(exception.VolumeNotCreated,
test_bdm._call_wait_func,
context=self.context,
wait_func=wait_func,
volume_api=self.volume_api,
volume_id='fake-id')
self.assertEqual(delete_on_termination, vol_delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
def test_call_wait_delete_volume_fail(self):
self._test_call_wait_func(True, True)
def test_call_wait_no_delete_volume(self):
self._test_call_wait_func(False)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
fail_volume_attach=False, access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance_detail = {'id': '123', 'uuid': 'fake_uuid'}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(None)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
if volume_attach:
driver_bdm._bdm_obj.save().AndReturn(None)
if not fail_volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
else:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndRaise(
test.TestingException)
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True,
fail_driver_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_volume_attach_volume_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True,
fail_volume_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save().AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3, '', '', snapshot,
availability_zone=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_fail_volume(self):
fail_volume_snapshot = self.snapshot_bdm.copy()
fail_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](fail_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
with contextlib.nested(
mock.patch.object(self.volume_api, 'get_snapshot',
return_value=snapshot),
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_get_snap, vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_get_snap.assert_called_once_with(
self.context, 'fake-snapshot-id-1')
vol_create.assert_called_once_with(
self.context, 3, '', '', snapshot, availability_zone=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1, '', '', image_id=image['id'],
availability_zone=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_fail_volume(self):
fail_volume_image = self.image_bdm.copy()
fail_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](fail_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, 1, '', '', image_id=image['id'],
availability_zone=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_fail_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size, 'fake-uuid-blank-vol',
'', availability_zone=instance.availability_zone)
vol_delete.assert_called_once_with(
self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size, 'fake-uuid-blank-vol',
'', availability_zone=instance.availability_zone)
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_convert_all_volumes(self):
converted = driver_block_device.convert_all_volumes()
self.assertEqual([], converted)
converted = driver_block_device.convert_all_volumes(
self.volume_bdm, self.ephemeral_bdm, self.image_bdm,
self.blank_bdm, self.snapshot_bdm)
self.assertEqual(converted, [self.volume_driver_bdm,
self.image_driver_bdm,
self.blank_driver_bdm,
self.snapshot_driver_bdm])
def test_convert_volume(self):
self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm))
self.assertEqual(self.volume_driver_bdm,
driver_block_device.convert_volume(self.volume_bdm))
self.assertEqual(self.snapshot_driver_bdm,
driver_block_device.convert_volume(self.snapshot_bdm))
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in range(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in range(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
| apache-2.0 | 6,771,117,812,992,498,000 | 41.381609 | 79 | 0.554649 | false |
ngokevin/zamboni | mkt/downloads/views.py | 1 | 1434 | from django import http
from django.shortcuts import get_object_or_404
import commonware.log
import amo
from amo.utils import HttpResponseSendFile
from mkt.access import acl
from mkt.files.models import File
from mkt.site.decorators import allow_cross_site_request
from mkt.webapps.models import Webapp
log = commonware.log.getLogger('z.downloads')
@allow_cross_site_request
def download_file(request, file_id, type=None):
file = get_object_or_404(File, pk=file_id)
webapp = get_object_or_404(Webapp, pk=file.version.addon_id,
is_packaged=True)
if webapp.is_disabled or file.status == amo.STATUS_DISABLED:
if not acl.check_addon_ownership(request, webapp, viewer=True,
ignore_disabled=True):
raise http.Http404()
# We treat blocked files like public files so users get the update.
if file.status in [amo.STATUS_PUBLIC, amo.STATUS_BLOCKED]:
path = webapp.sign_if_packaged(file.version_id)
else:
# This is someone asking for an unsigned packaged app.
if not acl.check_addon_ownership(request, webapp, dev=True):
raise http.Http404()
path = file.file_path
log.info('Downloading package: %s from %s' % (webapp.id, path))
return HttpResponseSendFile(request, path, content_type='application/zip',
etag=file.hash.split(':')[-1])
| bsd-3-clause | -5,565,101,115,594,177,000 | 33.97561 | 78 | 0.663877 | false |
ghchinoy/tensorflow | tensorflow/python/keras/layers/noise.py | 7 | 6944 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that operate regularization via the addition of noise.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.GaussianNoise')
class GaussianNoise(Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Arguments:
stddev: Float, standard deviation of the noise distribution.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding noise) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
def noised():
return inputs + K.random_normal(
shape=array_ops.shape(inputs), mean=0., stddev=self.stddev)
return K.in_train_phase(noised, inputs, training=training)
def get_config(self):
config = {'stddev': self.stddev}
base_config = super(GaussianNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.GaussianDropout')
class GaussianDropout(Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Arguments:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, **kwargs):
super(GaussianDropout, self).__init__(**kwargs)
self.supports_masking = True
self.rate = rate
def call(self, inputs, training=None):
if 0 < self.rate < 1:
def noised():
stddev = np.sqrt(self.rate / (1.0 - self.rate))
return inputs * K.random_normal(
shape=array_ops.shape(inputs), mean=1.0, stddev=stddev)
return K.in_train_phase(noised, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(GaussianDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.AlphaDropout')
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units
by randomly setting activations to the negative saturation value.
Arguments:
rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(AlphaDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
return self.noise_shape if self.noise_shape else array_ops.shape(inputs)
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = math_ops.greater_equal(
K.random_uniform(noise_shape, seed=seed), rate)
kept_idx = math_ops.cast(kept_idx, K.floatx())
# Get affine transformation params
a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5
b = -a * alpha_p * rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b
return K.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(AlphaDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| apache-2.0 | 1,387,097,785,866,963,700 | 32.708738 | 109 | 0.691388 | false |
jyeatman/dipy | doc/examples/sfm_reconst.py | 9 | 5785 | """
.. _sfm-reconst:
==============================================
Reconstruction with the Sparse Fascicle Model
==============================================
In this example, we will use the Sparse Fascicle Model [Rokem2014]_, to
reconstruct the fiber orientation distribution function (fODF) in every voxel.
First, we import the modules we will use in this example:
"""
import dipy.reconst.sfm as sfm
import dipy.data as dpd
import dipy.reconst.peaks as dpp
from dipy.viz import fvtk
"""
For the purpose of this example, we will use the Stanford HARDI dataset (150
directions, single b-value of 2000 s/mm$^2$) that can be automatically
downloaded. If you have not yet downloaded this data-set in one of the other
examples, you will need to be connected to the internet the first time you run
this example. The data will be stored for subsequent runs, and for use with
other examples.
"""
from dipy.data import read_stanford_hardi
img, gtab = read_stanford_hardi()
data = img.get_data()
"""
Reconstruction of the fiber ODF in each voxel guides subsequent tracking
steps. Here, the model is the Sparse Fascicle Model, described in
[Rokem2014]_. This model reconstructs the diffusion signal as a combination of
the signals from different fascicles. This model can be written as:
.. math::
y = X\beta
Where $y$ is the signal and $\beta$ are weights on different points in the
sphere. The columns of the design matrix, $X$ are the signals in each point in
the measurement that would be predicted if there was a fascicle oriented in the
direction represented by that column. Typically, the signal used for this
kernel will be a prolate tensor with axial diffusivity 3-5 times higher than
its radial diffusivity. The exact numbers can also be estimated from examining
parts of the brain in which there is known to be only one fascicle (e.g. in
corpus callosum).
Sparsity constraints on the fiber ODF ($\beta$) are set through the Elastic Net
algorihtm [Zou2005]_.
Elastic Net optimizes the following cost function:
.. math::
\sum_{i=1}^{n}{(y_i - \hat{y}_i)^2} + \alpha (\lambda \sum_{j=1}^{m}{w_j}+(1-\lambda) \sum_{j=1}^{m}{w^2_j}
where $\hat{y}$ is the signal predicted for a particular setting of $\beta$,
such that the left part of this expression is the squared loss function;
$\alpha$ is a parameter that sets the balance between the squared loss on
the data, and the regularization constraints. The regularization parameter
$\lambda$ sets the `l1_ratio`, which controls the balance between L1-sparsity
(low sum of weights), and low L2-sparsity (low sum-of-squares of the weights).
Just like constrained spherical deconvolution (see :ref:`reconst-csd`), the SFM
requires the definition of a response function. We'll take advantage of the
automated algorithm in the :mod:`csdeconv` module to find this response
function:
"""
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
"""
The ``response`` return value contains two entries. The first is an array with
the eigenvalues of the response function and the second is the average S0 for
this response.
It is a very good practice to always validate the result of auto_response. For,
this purpose we can print it and have a look at its values.
"""
print(response)
"""
(array([ 0.0014, 0.00029, 0.00029]), 416.206)
We initialize an SFM model object, using these values. We will use the default
sphere (362 vertices, symmetrically distributed on the surface of the sphere),
as a set of putative fascicle directions that are considered in the model
"""
sphere = dpd.get_sphere()
sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere,
l1_ratio=0.5, alpha=0.001,
response=response[0])
"""
For the purpose of the example, we will consider a small volume of data
containing parts of the corpus callosum and of the centrum semiovale
"""
data_small = data[20:50, 55:85, 38:39]
"""
Fitting the model to this small volume of data, we calculate the ODF of this
model on the sphere, and plot it.
"""
sf_fit = sf_model.fit(data_small)
sf_odf = sf_fit.odf(sphere)
fodf_spheres = fvtk.sphere_funcs(sf_odf, sphere, scale=1.3, norm=True)
ren = fvtk.ren()
fvtk.add(ren, fodf_spheres)
print('Saving illustration as sf_odfs.png')
fvtk.record(ren, out_path='sf_odfs.png', size=(1000, 1000))
"""
We can extract the peaks from the ODF, and plot these as well
"""
sf_peaks = dpp.peaks_from_model(sf_model,
data_small,
sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
return_sh=False)
fvtk.clear(ren)
fodf_peaks = fvtk.peaks(sf_peaks.peak_dirs, sf_peaks.peak_values, scale=1.3)
fvtk.add(ren, fodf_peaks)
print('Saving illustration as sf_peaks.png')
fvtk.record(ren, out_path='sf_peaks.png', size=(1000, 1000))
"""
Finally, we plot both the peaks and the ODFs, overlayed:
"""
fodf_spheres.GetProperty().SetOpacity(0.4)
fvtk.add(ren, fodf_spheres)
print('Saving illustration as sf_both.png')
fvtk.record(ren, out_path='sf_both.png', size=(1000, 1000))
"""
.. figure:: sf_both.png
:align: center
**SFM Peaks and ODFs**.
To see how to use this information in tracking, proceed to :ref:`sfm-track`.
References
----------
.. [Rokem2014] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2014). Evaluating the accuracy of diffusion MRI models in white
matter. http://arxiv.org/abs/1411.0721
.. [Zou2005] Zou H, Hastie T (2005). Regularization and variable
selection via the elastic net. J R Stat Soc B:301-320
"""
| bsd-3-clause | 176,874,845,774,013,950 | 32.439306 | 111 | 0.700605 | false |
agoraplex/platform | docs/conf.py | 1 | 9043 | # -*- coding: utf-8 -*-
#
# Agoraplex Platform documentation build configuration file, created by
# sphinx-quickstart2 on Fri Jan 11 20:37:25 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import agoraplex.themes.sphinx
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode',
'agoraplex.themes.sphinx.roles',]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Agoraplex Platform'
copyright = u'2013, Tripp Lilley'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'agoraplex'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_url': 'https://github.com/agoraplex/platform',
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = agoraplex.themes.sphinx.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'AgoraplexPlatformdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'AgoraplexPlatform.tex', u'Agoraplex Platform Documentation',
u'Tripp Lilley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'agoraplexplatform', u'Agoraplex Platform Documentation',
[u'Tripp Lilley'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'AgoraplexPlatform', u'Agoraplex Platform Documentation',
u'Tripp Lilley', 'AgoraplexPlatform', 'The Agoraplex Platform.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
intersphinx_mapping = {
# Python SDK docs
'python' : ( 'http://docs.python.org/2/', None ),
}
def map_rtfd_subprojects (url, project, subprojects):
"""
Generic helper to map Intersphinx inventories for several
ReadTheDocs.org sub-projects.
**TODO:** move this into our shared Sphinx helpers
"""
def urlfor (subproject, lang='en', version='latest', project=project):
return rtfd % locals()
return dict((subproject, (urlfor(subproject), None))
for subproject in subprojects)
rtfd = 'http://docs.agoraplex.net/projects/%(subproject)s/%(lang)s/%(version)s'
# Intersphinx the Agoraplex platform docs
project = 'agoraplex'
subprojects = ('anodi', 'predicates',)
intersphinx_mapping.update(map_rtfd_subprojects(rtfd, project, subprojects))
autodoc_default_flags = ['members', 'undoc-members']
add_module_names = False
| bsd-3-clause | -5,002,838,928,667,051,000 | 31.883636 | 126 | 0.704744 | false |
ABcDexter/python-weka-wrapper | python/weka/core/converters.py | 2 | 11100 | # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# converters.py
# Copyright (C) 2014-2015 Fracpete (pythonwekawrapper at gmail dot com)
import javabridge
from weka.core.classes import OptionHandler
from weka.core.capabilities import Capabilities
from weka.core.dataset import Instances, Instance, Attribute
import numpy
class Loader(OptionHandler):
"""
Wrapper class for Loaders.
"""
def __init__(self, classname="weka.core.converters.ArffLoader", jobject=None, options=None):
"""
Initializes the specified loader either using the classname or the JB_Object.
:param classname: the classname of the loader
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = Loader.new_instance(classname)
self.enforce_type(jobject, "weka.core.converters.Loader")
super(Loader, self).__init__(jobject=jobject, options=options)
self.incremental = False
self.structure = None
def __iter__(self):
"""
Returns an iterator in case the loader was instantiated in incremental mode, otherwise
an Exception is raised.
:return: the iterator
:rtype: IncrementalLoaderIterator
"""
if not self.incremental:
raise Exception("Not in incremental mode, cannot iterate!")
return IncrementalLoaderIterator(self, self.structure)
def load_file(self, dfile, incremental=False):
"""
Loads the specified file and returns the Instances object.
In case of incremental loading, only the structure.
:param dfile: the file to load
:type dfile: str
:param incremental: whether to load the dataset incrementally
:type incremental: bool
:return: the full dataset or the header (if incremental)
:rtype: Instances
"""
self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter")
self.incremental = incremental
if not javabridge.is_instance_of(dfile, "Ljava/io/File;"):
dfile = javabridge.make_instance(
"Ljava/io/File;", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(str(dfile)))
javabridge.call(self.jobject, "reset", "()V")
javabridge.call(self.jobject, "setFile", "(Ljava/io/File;)V", dfile)
if incremental:
self.structure = Instances(javabridge.call(self.jobject, "getStructure", "()Lweka/core/Instances;"))
return self.structure
else:
return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;"))
def load_url(self, url, incremental=False):
"""
Loads the specified URL and returns the Instances object.
In case of incremental loading, only the structure.
:param url: the URL to load the data from
:type url: str
:param incremental: whether to load the dataset incrementally
:type incremental: bool
:return: the full dataset or the header (if incremental)
:rtype: Instances
"""
self.enforce_type(self.jobject, "weka.core.converters.URLSourcedLoader")
self.incremental = incremental
javabridge.call(self.jobject, "reset", "()V")
javabridge.call(self.jobject, "setURL", "(Ljava/lang/String;)V", str(url))
if incremental:
self.structure = Instances(javabridge.call(self.jobject, "getStructure", "()Lweka/core/Instances;"))
return self.structure
else:
return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;"))
class IncrementalLoaderIterator(object):
"""
Iterator for dataset rows when loarding incrementally.
"""
def __init__(self, loader, structure):
"""
:param loader: the loader instance to use for loading the data incrementally
:type loader: Loader
:param structure: the dataset structure
:type structure: Instances
"""
self.loader = loader
self.structure = structure
def __iter__(self):
"""
Returns itself.
"""
return self
def next(self):
"""
Reads the next dataset row.
:return: the next row
:rtype: Instance
"""
result = javabridge.call(
self.loader.jobject, "getNextInstance",
"(Lweka/core/Instances;)Lweka/core/Instance;", self.structure.jobject)
if result is None:
raise StopIteration()
else:
return Instance(result)
class TextDirectoryLoader(OptionHandler):
"""
Wrapper class for TextDirectoryLoader.
"""
def __init__(self, jobject=None, options=None):
"""
Initializes the text directory loader either using a new instance or the JB_Object.
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to set
:type options: list
"""
if jobject is None:
jobject = TextDirectoryLoader.new_instance("weka.core.converters.TextDirectoryLoader")
self.enforce_type(jobject, "weka.core.converters.TextDirectoryLoader")
super(TextDirectoryLoader, self).__init__(jobject=jobject, options=options)
def load(self):
"""
Loads the text files from the specified directory and returns the Instances object.
In case of incremental loading, only the structure.
:return: the full dataset or the header (if incremental)
:rtype: Instances
"""
javabridge.call(self.jobject, "reset", "()V")
return Instances(javabridge.call(self.jobject, "getDataSet", "()Lweka/core/Instances;"))
class Saver(OptionHandler):
"""
Wrapper class for Savers.
"""
def __init__(self, classname="weka.core.converters.ArffSaver", jobject=None, options=None):
"""
Initializes the specified saver either using the classname or the provided JB_Object.
:param classname: the classname of the saver
:type classname: str
:param jobject: the JB_Object to use
:type jobject: JB_Object
:param options: the list of commandline options to use
:type options: list
"""
if jobject is None:
jobject = Saver.new_instance(classname)
self.enforce_type(jobject, "weka.core.converters.Saver")
super(Saver, self).__init__(jobject=jobject, options=options)
def capabilities(self):
"""
Returns the capabilities of the saver.
:return: the capabilities
:rtype: Capabilities
"""
return Capabilities(javabridge.call(self.jobject, "getCapabilities", "()Lweka/core/Capabilities;"))
def save_file(self, data, dfile):
"""
Saves the Instances object in the specified file.
:param data: the data to save
:type data: Instances
:param dfile: the file to save the data to
:type dfile: str
"""
self.enforce_type(self.jobject, "weka.core.converters.FileSourcedConverter")
if not javabridge.is_instance_of(dfile, "Ljava/io/File;"):
dfile = javabridge.make_instance(
"Ljava/io/File;", "(Ljava/lang/String;)V", javabridge.get_env().new_string_utf(str(dfile)))
javabridge.call(self.jobject, "setFile", "(Ljava/io/File;)V", dfile)
javabridge.call(self.jobject, "setInstances", "(Lweka/core/Instances;)V", data.jobject)
javabridge.call(self.jobject, "writeBatch", "()V")
def loader_for_file(filename):
"""
Returns a Loader that can load the specified file, based on the file extension. None if failed to determine.
:param filename: the filename to get the loader for
:type filename: str
:return: the assoicated loader instance or None if none found
:rtype: Loader
"""
loader = javabridge.static_call(
"weka/core/converters/ConverterUtils", "getLoaderForFile",
"(Ljava/lang/String;)Lweka/core/converters/AbstractFileLoader;", filename)
if loader is None:
return None
else:
return Loader(jobject=loader)
def saver_for_file(filename):
"""
Returns a Saver that can load the specified file, based on the file extension. None if failed to determine.
:param filename: the filename to get the saver for
:type filename: str
:return: the associated saver instance or None if none found
:rtype: Saver
"""
saver = javabridge.static_call(
"weka/core/converters/ConverterUtils", "getSaverForFile",
"(Ljava/lang/String;)Lweka/core/converters/AbstractFileSaver;", filename)
if saver is None:
return None
else:
return Saver(jobject=saver)
def ndarray_to_instances(array, relation, att_template="Att-#", att_list=None):
"""
Converts the numpy matrix into an Instances object and returns it.
:param array: the numpy ndarray to convert
:type array: numpy.darray
:param relation: the name of the dataset
:type relation: str
:param att_template: the prefix to use for the attribute names, "#" is the 1-based index,
"!" is the 0-based index, "@" the relation name
:type att_template: str
:param att_list: the list of attribute names to use
:type att_list: list
:return: the generated instances object
:rtype: Instances
"""
if len(numpy.shape(array)) != 2:
raise Exception("Number of array dimensions must be 2!")
rows, cols = numpy.shape(array)
# header
atts = []
if att_list is not None:
if len(att_list) != cols:
raise Exception(
"Number columns and provided attribute names differ: " + str(cols) + " != " + len(att_list))
for name in att_list:
att = Attribute.create_numeric(name)
atts.append(att)
else:
for i in xrange(cols):
name = att_template.replace("#", str(i+1)).replace("!", str(i)).replace("@", relation)
att = Attribute.create_numeric(name)
atts.append(att)
result = Instances.create_instances(relation, atts, rows)
# data
for i in xrange(rows):
inst = Instance.create_instance(array[i])
result.add_instance(inst)
return result
| gpl-3.0 | -3,305,952,681,374,735,000 | 37.811189 | 112 | 0.642252 | false |
dawran6/zulip | contrib_bots/bots/googlesearch/googlesearch.py | 8 | 2823 | # See zulip/contrib_bots/lib/readme.md for instructions on running this code.
from __future__ import print_function
import logging
import http.client
from six.moves.urllib.request import urlopen
# Uses the Google search engine bindings
# pip install --upgrade google
from google import search
def get_google_result(search_keywords):
if search_keywords == 'help':
help_message = "To use this bot start message with @google \
followed by what you want to search for. If \
found, Zulip will return the first search result \
on Google. An example message that could be sent is:\
'@google zulip' or '@google how to create a chatbot'."
return help_message
else:
try:
urls = search(search_keywords, stop=20)
urlopen('http://216.58.192.142', timeout=1)
except http.client.RemoteDisconnected as er:
logging.exception(er)
return 'Error: No internet connection. {}.'.format(er)
except Exception as e:
logging.exception(e)
return 'Error: Search failed. {}.'.format(e)
if not urls:
return 'No URLs returned by google.'
url = next(urls)
return 'Success: {}'.format(url)
class GoogleSearchHandler(object):
'''
This plugin allows users to enter a search
term in Zulip and get the top URL sent back
to the context (stream or private) in which
it was called. It looks for messages starting
with @google.
'''
def usage(self):
return '''
This plugin will allow users to search
for a given search term on Google from
Zulip. Use '@google help' to get more
information on the bot usage. Users
should preface messages with @google.
'''
def handle_message(self, message, client, state_handler):
original_content = message['content']
original_sender = message['sender_email']
result = get_google_result(original_content)
if message['type'] == 'private':
client.send_message(dict(
type='private',
to=original_sender,
content=result,
))
else:
client.send_message(dict(
type=message['type'],
to=message['display_recipient'],
subject=message['subject'],
content=result,
))
handler_class = GoogleSearchHandler
def test():
try:
urlopen('http://216.58.192.142', timeout=1)
print('Success')
return True
except http.client.RemoteDisconnected as e:
print('Error: {}'.format(e))
return False
if __name__ == '__main__':
test()
| apache-2.0 | -3,737,393,330,933,600,000 | 31.825581 | 78 | 0.581651 | false |
kaifabian/lana-dashboard | lana_dashboard/lana_data/models/autonomous_system.py | 1 | 1459 | from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from lana_dashboard.lana_data.models.institution import Institution
class AutonomousSystem(models.Model):
as_number = models.BigIntegerField(unique=True, verbose_name=_("AS Number"))
comment = models.CharField(max_length=255, blank=True, null=True, verbose_name=_("Comment"))
location_lat = models.FloatField(blank=True, null=True, verbose_name=_("Latitude"))
location_lng = models.FloatField(blank=True, null=True, verbose_name=_("Longitude"))
private = models.BooleanField(default=False, verbose_name=_("Private"))
institution = models.ForeignKey(Institution, related_name='autonomous_systems', verbose_name=_("Institution"))
class Meta:
ordering = ['as_number']
verbose_name = ungettext_lazy("Autonomous System", "Autonomous Systems", 1)
verbose_name_plural = ungettext_lazy("Autonomous System", "Autonomous Systems", 2)
@classmethod
def get_view_qs(cls, user):
return cls.objects.filter(Q(private=False) | Q(institution__owners=user)).distinct('as_number')
@property
def has_geo(self):
return self.location_lat is not None and self.location_lng is not None
def __str__(self):
return "AS{}".format(self.as_number)
def can_view(self, user):
return not self.private or self.institution.owners.filter(id=user.id).exists()
def can_edit(self, user):
return self.institution.can_edit(user)
| agpl-3.0 | -8,018,120,222,579,425,000 | 38.432432 | 111 | 0.747772 | false |
sgiavasis/C-PAC | CPAC/GUI/interface/windows/config_window.py | 1 | 38458 | import wx
from CPAC.GUI.interface.utils.constants import substitution_map
import pkg_resources as p
from CPAC.GUI.interface.pages import WorkflowConfig, Motion, AnatomicalPreprocessing, \
DerivativesConfig, Segmentation, Registration, FunctionalPreProcessing,\
MotionOptions, Scrubbing, AnatToFuncRegistration, FuncToMNIRegistration,\
VMHC, VMHCSettings, ReHo, ReHoSettings, \
SCA, SCASettings, MultipleRegressionSCA,\
Settings, ComputerSettings, DirectorySettings, \
Nuisance, NuisanceCorrection, MedianAngleCorrection,\
CentralitySettings, Centrality,\
ALFF, ALFFSettings,\
Smoothing, SmoothingSettings,\
Filtering, FilteringSettings,\
TimeSeries, ROITimeseries, VOXELTimeseries, \
SpatialRegression, GenerateSeeds, VerticesTimeSeries,\
GroupAnalysis, GPASettings, BASCSettings,\
BASC, CWAS, CWASSettings,\
DualRegression, DualRegressionOptions, TimeSeriesOptions
ID_SUBMIT = 6
class Mybook(wx.Treebook):
def __init__(self, parent):
wx.Treebook.__init__(self, parent, wx.ID_ANY, style=
wx.BK_DEFAULT)
self.page_list = []
# create the page windows as children of the notebook
page1 = Settings(self)
page2 = ComputerSettings(self)
page3 = DirectorySettings(self)
page4 = WorkflowConfig(self)
page47 = DerivativesConfig(self)
page5 = AnatomicalPreprocessing(self)
page6 = Registration(self, 1)
page7 = Segmentation(self, 2)
page8 = FunctionalPreProcessing(self)
page9 = TimeSeriesOptions(self)
page10 = AnatToFuncRegistration(self, 5)
page11 = FuncToMNIRegistration(self, 6)
page12 = Nuisance(self)
page13 = NuisanceCorrection(self, 7)
page14 = MedianAngleCorrection(self, 8)
page15 = Filtering(self)
page16 = FilteringSettings(self, 9)
page17 = Motion(self)
page18 = MotionOptions(self)
page19 = Scrubbing(self, 4)
page20 = TimeSeries(self)
page21 = GenerateSeeds(self)
page22 = ROITimeseries(self)
page23 = VOXELTimeseries(self)
page24 = VerticesTimeSeries(self)
page25 = SpatialRegression(self)
page26 = SCA(self)
page27 = SCASettings(self)
page28 = MultipleRegressionSCA(self)
page29 = DualRegression(self)
page30 = DualRegressionOptions(self)
page31 = VMHC(self)
page32 = VMHCSettings(self)
page33 = ALFF(self)
page34 = ALFFSettings(self)
page35 = ReHo(self)
page36 = ReHoSettings(self)
page37 = Centrality(self)
page38 = CentralitySettings(self)
page39 = Smoothing(self)
page40 = SmoothingSettings(self)
page41 = BASC(self)
page42 = BASCSettings(self)
page43 = CWAS(self)
page44 = CWASSettings(self)
page45 = GroupAnalysis(self)
page46 = GPASettings(self)
# add the pages to the notebook with the label to show on the tab
self.AddPage(page1, "Environment Setup", wx.ID_ANY)
self.AddSubPage(page2, "Computer Settings", wx.ID_ANY)
self.AddSubPage(page3, "Output Settings", wx.ID_ANY)
self.AddSubPage(page4, "Preprocessing Workflow Options", wx.ID_ANY)
self.AddSubPage(page47, "Derivatives Settings", wx.ID_ANY)
self.AddPage(page5, "Anatomical Preprocessing", wx.ID_ANY)
self.AddSubPage(page6, "Anatomical Registration", wx.ID_ANY)
self.AddSubPage(page7, "Tissue Segmentation", wx.ID_ANY)
self.AddPage(page8, "Functional Preprocessing", wx.ID_ANY)
self.AddSubPage(page9, "Time Series Options", wx.ID_ANY)
self.AddSubPage(page10, "Functional to Anatomical Registration", wx.ID_ANY)
self.AddSubPage(page11, "Functional to MNI Registration", wx.ID_ANY)
self.AddPage(page12, "Nuisance", wx.ID_ANY)
self.AddSubPage(page13, "Nuisance Correction", wx.ID_ANY)
self.AddSubPage(page14, "Median Angle Correction", wx.ID_ANY)
self.AddPage(page15, "Temporal Filtering", wx.ID_ANY)
self.AddSubPage(page16, "Temporal Filtering Options", wx.ID_ANY)
self.AddPage(page17, "Motion Correction", wx.ID_ANY)
self.AddSubPage(page18, "Motion Correction Options", wx.ID_ANY)
self.AddSubPage(page19, "Scrubbing Options", wx.ID_ANY)
self.AddPage(page20, "Time Series Extraction (TSE)", wx.ID_ANY)
self.AddSubPage(page21, "Define New Seeds", wx.ID_ANY)
self.AddSubPage(page22, "ROI Average TSE", wx.ID_ANY)
self.AddSubPage(page23, "ROI Voxelwise TSE", wx.ID_ANY)
self.AddSubPage(page24, "Surface Vertices TSE", wx.ID_ANY)
self.AddSubPage(page25, "Spatial Regression", wx.ID_ANY)
self.AddPage(page26, "Seed-based Correlation Analysis (SCA)", wx.ID_ANY)
self.AddSubPage(page27, "SCA Options", wx.ID_ANY)
self.AddSubPage(page28, "Mutiple Regression SCA Options", wx.ID_ANY)
self.AddPage(page29, "Dual Regression", wx.ID_ANY)
self.AddSubPage(page30, "Dual Regression Options", wx.ID_ANY)
self.AddPage(page31, "Voxel-mirrored Homotopic Connectivity", wx.ID_ANY)
self.AddSubPage(page32, "VMHC Settings", wx.ID_ANY)
self.AddPage(page33, "ALFF and f/ALFF", wx.ID_ANY)
self.AddSubPage(page34, "ALFF and f/ALFF Options", wx.ID_ANY)
self.AddPage(page35, "Regional Homogeneity (ReHo)", wx.ID_ANY)
self.AddSubPage(page36, "ReHo Options", wx.ID_ANY)
self.AddPage(page37, "Network Centrality", wx.ID_ANY)
self.AddSubPage(page38, "Network Centrality Options", wx.ID_ANY)
self.AddPage(page39, "Spatial Smoothing", wx.ID_ANY)
self.AddSubPage(page40, "Spatial Smoothing Options", wx.ID_ANY)
self.AddPage(page41, "Bootstrap Analysis of Stable Clusters", wx.ID_ANY)
self.AddSubPage(page42, "BASC Settings", wx.ID_ANY)
self.AddPage(page43, "CWAS", wx.ID_ANY)
self.AddSubPage(page44, "CWAS Settings", wx.ID_ANY)
self.AddPage(page45, "Group Analysis", wx.ID_ANY)
self.AddSubPage(page46, "Group Analysis Settings", wx.ID_ANY)
self.Bind(wx.EVT_TREEBOOK_PAGE_CHANGED, self.OnPageChanged)
self.Bind(wx.EVT_TREEBOOK_PAGE_CHANGING, self.OnPageChanging)
# This is a workaround for a sizing bug on Mac...
wx.FutureCall(100, self.AdjustSize)
self.SetSelection(1)
self.Refresh()
def OnPageChanged(self, event):
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.GetSelection()
event.Skip()
def OnPageChanging(self, event):
old = event.GetOldSelection()
new = event.GetSelection()
sel = self.GetSelection()
event.Skip()
def AdjustSize(self):
self.GetTreeCtrl().InvalidateBestSize()
self.SendSizeEvent()
def get_page_list(self):
return self.page_list
def get_page(self, index):
return self.page_list[index]
class MainFrame(wx.Frame):
def __init__(self, parent, option='save', path="", pipeline_id=""):
wx.Frame.__init__(
self, parent=parent, title="CPAC Pipeline Configuration", size=(1200, 520))
# Here we create a panel and a notebook on the panel
self.p = wx.Panel(self)
self.nb = Mybook(self.p)
self.path = path
self.pipeline_id = pipeline_id
self.option = option
self.parent = parent
btnPanel = wx.Panel(self.p, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
submit = wx.Button(btnPanel, wx.ID_SAVE, "Save", (
280, 10), wx.DefaultSize, 0)
hbox.Add(submit, 0.6, wx.ALIGN_RIGHT | wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.submit_item, id=wx.ID_SAVE)
testConfig = wx.Button(btnPanel, wx.ID_PREVIEW, "Test Configuration", (
350, 10), wx.DefaultSize, 0)
hbox.Add(testConfig, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
self.Bind(wx.EVT_BUTTON, self.testConfig, id=wx.ID_PREVIEW)
cancel = wx.Button(btnPanel, wx.ID_CANCEL, "Cancel", (
220, 10), wx.DefaultSize, 0)
self.Bind(wx.EVT_BUTTON, self.cancel, id=wx.ID_CANCEL)
hbox.Add(cancel, 0, wx.ALIGN_RIGHT | wx.ALL, 5)
btnPanel.SetSizer(hbox)
# finally, put the notebook in a sizer for the panel to manage
# the layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.nb, 1, wx.EXPAND)
sizer.Add(btnPanel, 0.6, wx.EXPAND | wx.RIGHT, 20)
self.p.SetSizer(sizer)
self.Layout()
self.Show()
if option == 'edit' or option == 'load':
self.load()
def load(self):
import yaml
try:
config_file_map = yaml.load(open(self.path, 'r'))
except:
raise Exception("Error importing file - %s , Make"
" sure it is in correct yaml format")
#for config in config_file_map:
# print "\n\n config: ", config, " selection: ", config_file_map[config]
for page in self.nb.get_page_list():
ctrl_list = page.page.get_ctrl_list()
for ctrl in ctrl_list:
name = ctrl.get_name()
val = config_file_map.get(str(name))
#print "loading ctrl ->", name, "->", val
sample_list = ctrl.get_values()
#print "sample_list -->", sample_list
s_map = dict((v, k)
for k, v in substitution_map.iteritems())
if val:
if isinstance(val, list):
if ctrl.get_datatype() == 8:
value = []
for item in val:
data = ""
for k, v in item.iteritems():
if v == 1 and k in sample_list:
if data:
data = data + "," + k
else:
data = k
value.append(data)
elif ctrl.get_datatype() == 6:
value = []
for v in val:
value.append(str(v))
elif ctrl.get_datatype() == 3:
value = [sample_list[i]
for i, x in enumerate(val) if x == True]
elif ctrl.get_datatype() == 4:
if 1 in val and 0 in val:
val = [10]
if 'ANTS' in val and 'FSL' in val:
val = [11]
if '3dAutoMask' in val and 'BET' in val:
val = [12]
value = [s_map.get(item)
for item in val if s_map.get(item) != None]
if not value:
value = [ str(item) for item in val]
elif ctrl.get_datatype() == 5 and ctrl.get_type() == 6:
value = [sample_list[v] for v in val]
else:
value = None
for v in val:
if value:
value = value + "," + str(v)
else:
value = str(v)
else:
if ctrl.get_datatype() == 2 and ctrl.get_type() == 0 and\
str(val) not in sample_list:
value = sample_list[val]
else:
value = str(val)
else:
value = ""
#print "setting value in ctrl -->", value
#print "type -->", type(value)
ctrl.set_value(value)
def testConfig(self, event):
'''
This function runs when the user clicks the "Test Configuration"
button in the pipeline configuration window.
It prompts the user for a sample subject list (i.e. one that they will
be using with the config they are building). Then it builds the
pipeline but does not run it. It then reports whether or not the
config will run or not depending on if the pipeline gets built
successfully.
'''
import os
import yaml
from CPAC.utils import Configuration
from CPAC.pipeline.cpac_pipeline import prep_workflow
from CPAC.pipeline.cpac_runner import build_strategies
def display(win, msg, changeBg=True):
wx.MessageBox(msg, "Error")
if changeBg:
win.SetBackgroundColour("pink")
win.SetFocus()
win.Refresh()
# Collect a sample subject list and parse it in
testDlg0 = wx.MessageDialog(
self, 'This tool will run a quick check on the current pipeline configuration.' \
' Click OK to provide a subject list you will be using with this setup.',
'Subject List',
wx.OK | wx.ICON_INFORMATION)
testDlg0.ShowModal()
testDlg0.Destroy()
dlg = wx.FileDialog(
self, message="Choose the CPAC Subject list file",
defaultDir=os.getcwd(),
defaultFile="CPAC_subject_list.yml",
wildcard="YAML files(*.yaml, *.yml)|*.yaml;*.yml",
style=wx.OPEN | wx.CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
subListPath = dlg.GetPath()
sublist = yaml.load(open(os.path.realpath(subListPath), 'r'))
# Check to ensure the user is providing an actual subject
# list and not some other kind of file
try:
subInfo = sublist[0]
except:
errDlg4 = wx.MessageDialog(
self, 'ERROR: Subject list file not in proper format - check if you' \
' loaded the correct file? \n\n' \
'Error name: config_window_0001',
'Subject List Error',
wx.OK | wx.ICON_ERROR)
errDlg4.ShowModal()
errDlg4.Destroy()
raise Exception
# Another check to ensure the actual subject list was generated
# properly and that it will work
if 'subject_id' not in subInfo:
errDlg3 = wx.MessageDialog(
self, 'ERROR: Subject list file not in proper format - check if you' \
' loaded the correct file? \n\n' \
'Error name: config_window_0002',
'Subject List Error',
wx.OK | wx.ICON_ERROR)
errDlg3.ShowModal()
errDlg3.Destroy()
raise Exception
# Following code reads in the parameters and selections from the
# pipeline configuration window and populate the config_list
config_list = []
wf_counter = []
#print "self.nb.get_page_list()", self.nb.get_page_list()
for page in self.nb.get_page_list():
#print "page ----> ", page
switch = page.page.get_switch()
#print "switch ---->", switch
ctrl_list = page.page.get_ctrl_list()
validate = False
if switch:
switch_val = str(switch.get_selection()).lower()
#print "switch_val ---->", switch_val
if switch_val == 'on' or switch_val == 'true' or switch_val == '1':
validate = True
wf_counter.append(page.get_counter())
for ctrl in ctrl_list:
#validating
if (switch == None or validate) and ctrl.get_validation():
win = ctrl.get_ctrl()
#print "validating ctrl-->", ctrl.get_name()
#print "ctrl.get_selection()", ctrl.get_selection()
#print "type(ctrl.get_selection())", type(ctrl.get_selection())
if isinstance(ctrl.get_selection(), list):
value = ctrl.get_selection()
if not value:
display(
win, "%s field is empty or the items are not checked!" % ctrl.get_name(), False)
return
else:
value = str(ctrl.get_selection())
if len(value) == 0:
display(win, "%s field is empty!" % ctrl.get_name())
return
if '/' in value and '$' not in value and not isinstance(value, list):
if not os.path.exists(ctrl.get_selection()) and value != 'On/Off':
display(
win, "%s field contains incorrect path. Please update the path!" % ctrl.get_name())
return
config_list.append(ctrl)
# Get the user's CPAC output directory for use in this script
for config in config_list:
#print config.get_name(), " ", config.get_selection()
if config.get_name() == 'outputDirectory':
outDir = config.get_selection()
# Write out a pipeline_config file, read it in and then delete it
# (Will revise the data structure of the config files later so this
# can just pass the data structure instead of doing it this way)
try:
self.write(outDir + 'testConfig.yml', config_list)
c = Configuration(yaml.load(open(os.path.realpath(outDir + 'testConfig.yml'), 'r')))
os.remove(outDir + 'testConfig.yml')
except:
errDlg2 = wx.MessageDialog(
self, 'A problem occurred with preparing the pipeline test run. \n\n' \
'Please ensure you have rights access to the directories you' \
' have chosen for the CPAC working, crash, and output folders.',
'Test Configuration Error',
wx.OK | wx.ICON_ERROR)
errDlg2.ShowModal()
errDlg2.Destroy()
if (1 in c.runNuisance) or (c.Corrections != None):
strategies = sorted(build_strategies(c))
else:
strategies = None
# Run the actual pipeline building prep and see if it works or not
testDlg1 = wx.MessageDialog(
self, 'Click OK to run the test. This should take only a few seconds.',
'Running Test',
wx.OK | wx.ICON_INFORMATION)
testDlg1.ShowModal()
# Check file paths first
# Just getting proper names of config file parameters
try:
params_file = open(p.resource_filename('CPAC', 'GUI/resources/config_parameters.txt'), "r")
except:
print "Error: Could not open configuration parameter file.", "\n"
raise Exception
paramInfo = params_file.read().split('\n')
paramList = []
for param in paramInfo:
if param != '':
paramList.append(param.split(','))
# function for file path checking
def testFile(filepath, paramName):
try:
if filepath != None:
fileTest = open(filepath)
fileTest.close()
except:
testDlg1.Destroy()
for param in paramList:
if param[0] == paramName:
paramTitle = param[1]
paramGroup = param[2]
break
errDlgFileTest = wx.MessageDialog(
self, 'Error reading file - either it does not exist or you' \
' do not have read access. \n\n' \
'Parameter: %s \n' \
'In tab: %s \n\n' \
'Path: %s' % (paramTitle, paramGroup, filepath),
'Pipeline Not Ready',
wx.OK | wx.ICON_ERROR)
errDlgFileTest.ShowModal()
errDlgFileTest.Destroy()
testFile(c.standardResolutionBrainAnat,'standardResolutionBrainAnat')
testFile(c.standardAnat,'standardAnat')
testFile(c.PRIOR_WHITE,'PRIOR_WHITE')
testFile(c.PRIOR_GRAY,'PRIOR_GRAY')
testFile(c.PRIOR_CSF,'PRIOR_CSF')
testFile(c.standardResolutionBrain,'standardResolutionBrain')
testFile(c.standard,'standard')
testFile(c.identityMatrix,'identityMatrix')
testFile(c.boundaryBasedRegistrationSchedule,'boundaryBasedRegistrationSchedule')
testFile(c.harvardOxfordMask,'harvardOxfordMask')
testFile(c.seedSpecificationFile,'seedSpecificationFile')
testFile(c.roiSpecificationFile,'roiSpecificationFile')
testFile(c.roiSpecificationFileForSCA,'roiSpecificationFileForSCA')
testFile(c.maskSpecificationFile,'maskSpecificationFile')
testFile(c.maskSpecificationFileForSCA,'maskSpecificationFileForSCA')
testFile(c.spatialPatternMaps,'spatialPatternMaps')
testFile(c.brainSymmetric,'brainSymmetric')
testFile(c.symmStandard,'symmStandard')
testFile(c.twommBrainMaskDiluted,'twommBrainMaskDiluted')
testFile(c.configFileTwomm,'configFileTwomm')
testFile(c.templateSpecificationFile,'templateSpecificationFile')
testFile(c.bascAffinityThresholdFile,'bascAffinityThresholdFile')
testFile(c.cwasROIFile,'cwasROIFile')
testFile(c.cwasRegressorFile,'cwasRegressorFile')
try:
# Run the pipeline building
prep_workflow(sublist[0], c, strategies, 0)
except:
testDlg1.Destroy()
errDlg1 = wx.MessageDialog(
self, 'There are issues with the current configuration which need to be' \
' resolved - please check to make sure the options you are running' \
' have the proper pre-requisites selected.',
'Pipeline Not Ready',
wx.OK | wx.ICON_ERROR)
errDlg1.ShowModal()
errDlg1.Destroy()
else:
testDlg1.Destroy()
okDlg1 = wx.MessageDialog(
self, 'The current configuration will run successfully. You can safely' \
' save and run this setup!',
'Pipeline Ready',
wx.OK | wx.ICON_INFORMATION)
okDlg1.ShowModal()
okDlg1.Destroy()
def submit_item(self, event):
import os
import linecache
def display(win, msg, changeBg=True):
wx.MessageBox(msg, "Error")
if changeBg:
win.SetBackgroundColour("pink")
win.SetFocus()
win.Refresh()
config_list = []
hash_val = 0
wf_counter = []
#print "self.nb.get_page_list()", self.nb.get_page_list()
for page in self.nb.get_page_list():
#print "page ----> ", page
switch = page.page.get_switch()
#print "switch ---->", switch
ctrl_list = page.page.get_ctrl_list()
validate = False
if switch:
switch_val = str(switch.get_selection()).lower()
#print "switch_val ---->", switch_val
if switch_val == 'on' or switch_val == 'true' or switch_val == '1':
validate = True
wf_counter.append(page.get_counter())
for ctrl in ctrl_list:
#validating
if (switch == None or validate) and ctrl.get_validation():
win = ctrl.get_ctrl()
#print "validating ctrl-->", ctrl.get_name()
#print "ctrl.get_selection()", ctrl.get_selection()
#print "type(ctrl.get_selection())", type(ctrl.get_selection())
if isinstance(ctrl.get_selection(), list):
value = ctrl.get_selection()
if not value:
display(
win, "%s field is empty or the items are not checked!" % ctrl.get_name(), False)
return
else:
value = str(ctrl.get_selection())
if len(value) == 0:
display(win, "%s field is empty!" % ctrl.get_name())
return
if '/' in value and '$' not in value and not isinstance(value, list):
if not os.path.exists(ctrl.get_selection()) and value != 'On/Off':
display(
win, "%s field contains incorrect path. Please update the path!" % ctrl.get_name())
return
config_list.append(ctrl)
# Get the user's CPAC pipeline name for use in this script
for config in config_list:
if config.get_name() == 'pipelineName':
pipeline_name = config.get_selection()
if len(pipeline_name) == 0:
noNameDlg = wx.MessageDialog(
self, 'Please enter a pipeline name.',
'Error!',
wx.OK | wx.ICON_ERROR)
noNameDlg.ShowModal()
noNameDlg.Destroy()
return
dlg = wx.FileDialog(
self, message="Save CPAC configuration file as ...", defaultDir=os.getcwd(),
defaultFile=("pipeline_config_%s" % pipeline_name), wildcard="YAML files(*.yaml, *.yml)|*.yaml;*.yml", style=wx.SAVE)
dlg.SetFilterIndex(2)
if dlg.ShowModal() == wx.ID_OK:
self.path = dlg.GetPath()
# Strips any user-input file extension and enforces .yml as
# the extension
self.path = os.path.splitext(self.path)[0] + '.yml'
self.write(self.path, config_list)
dlg.Destroy()
if self.option != 'edit':
# this runs if you hit 'Save' from within the pipeline config
# editor AND the editor was opened from the main window by
# clicking 'New' instead of 'Edit'
### this is the old code for generating random city names
### to name pipeline configs. remove at some point?
#for counter in wf_counter:
# if counter != 0:
# hash_val += 2 ** counter
#print "wf_counter -- ", wf_counter
#print "hashval --> ", hash_val
#pipeline_id = linecache.getline(p.resource_filename('CPAC', \
# 'GUI/resources/pipeline_names.py'), hash_val)
print "pipeline_id ==", pipeline_name
if os.path.exists(self.path):
self.update_listbox(pipeline_name)
else:
# this runs if you hit 'Save' from within the pipeline config
# editor AND the editor was opened from the main window by
# clicking 'Edit' instead of 'New'
pipeline_map = self.parent.get_pipeline_map()
if pipeline_map.get(pipeline_name) != None:
# this runs if you hit Edit, change your pipeline config
# file BUT keep the Pipeline Name the same and save it
pipeline_map[pipeline_name] = self.path
else:
# this runs if you hit Edit, change your pipeline config
# AND also change the Pipeline Name and save it with the
# new path - this adds the new pipeline to the listbox on
# the main CPAC window
pipeline_map[pipeline_name] = self.path
self.Parent.listbox.Append(pipeline_name)
self.SetFocus()
self.Close()
def cancel(self, event):
self.Close()
def update_listbox(self, value):
if len(value) > 0:
self.pipeline_id = value
pipeline_map = self.parent.get_pipeline_map()
if pipeline_map.get(self.pipeline_id) == None:
pipeline_map[self.pipeline_id] = self.path
self.Parent.listbox.Append(self.pipeline_id)
else:
dlg2 = wx.MessageDialog(
self, 'Pipeline already exists. Please enter a new name',
'Error!',
wx.OK | wx.ICON_ERROR)
dlg2.ShowModal()
dlg2.Destroy()
'''
def update_listbox(self, value):
while True:
dlg = wx.TextEntryDialog(
self, 'Please enter a unique pipeline id for the configuration',
'Pipeline Id', value.strip())
dlg.SetValue(str(value.strip()))
dlg.Restore()
if dlg.ShowModal() == wx.ID_OK:
if len(dlg.GetValue()) > 0:
self.pipeline_id = dlg.GetValue()
pipeline_map = self.parent.get_pipeline_map()
if pipeline_map.get(self.pipeline_id) == None:
pipeline_map[self.pipeline_id] = self.path
self.Parent.listbox.Append(self.pipeline_id)
dlg.Destroy()
break
else:
dlg2 = wx.MessageDialog(
self, 'Pipeline already exist. Please enter a new name',
'Error!',
wx.OK | wx.ICON_ERROR)
dlg2.ShowModal()
dlg2.Destroy()
'''
def write(self, path, config_list):
import ast
try:
f = open(path, 'w')
for item in config_list:
label = item.get_name()
value = item.get_selection()
dtype = item.get_datatype()
type = item.get_type()
'''
print "LABEL: ", label
print "VALUE: ", value
print "DTYPE: ", dtype
print "TYPE: ", type
print ""
'''
sample_list = item.get_values()
comment = item.get_help()
#print "*****label : type : value -->", label, " : ", dtype, " : ", value
for line in comment.split("\n"):
if line:
print>>f, "#", line
# prints setting names and values (ex. " runAnatomicalProcessing: [1] ") into the
# pipeline_config file, using a different set of code depending on the data type
# parameters that are strings (ex. " False " or a path)
if dtype == 0 or dtype == 1:
print >>f, label, ": ", str(value)
print >>f,"\n"
# parameters that are integers
elif dtype == 2:
if type == 0:
value = sample_list.index(value)
else:
if substitution_map.get(value) != None:
value = substitution_map.get(value)
elif value != 'None':
value = ast.literal_eval(str(value))
print >>f, label, ": ", value
print >>f,"\n"
# parameters that are lists (ex. " [False, False] ")
elif dtype == 3:
map = ast.literal_eval(str(value))
values = []
for x in range(0, len(map.keys())):
values.append(False)
for k, v in map.iteritems():
item, idx = k
values[idx] = v
print>>f, label, ": ", values
print>>f,"\n"
# parameters that are switches (ex. [0] or [1] )
elif dtype == 4:
values=[]
if isinstance(value, list):
value = ast.literal_eval(str(value))
else:
value = str(value).split(",")
for val in value:
val = val.strip()
sval = substitution_map.get(val)
if sval != None:
values.append(sval)
else:
values.append(val)
if values == [10]:
values = [1,0]
elif values == [11]:
values = ['ANTS','FSL']
elif values == [12]:
values = ['3dAutoMask','BET']
print>>f, label, ": ", values
print>>f,"\n"
# parameters that are bracketed numbers (int or float)
elif dtype == 5:
'''
print "1: ", ast.literal_eval(value)
print "2: ", ast.literal_eval(str(value))
print "3: ", value
print "4: ", str(value)
print "5: ", [value]
print "6: ", list(value)
print "7: ", [sample_list.index(val) for val in value]
'''
'''
if isinstance(value, list):
value = ast.literal_eval(str(value))
else:
value = str(value)
'''
'''
if isinstance(value, tuple):
value = list(value)
elif isinstance(value, list):
value = [sample_list.index(val) for val in value]
else:
value = [value]
'''
### parse user input ### can't use internal function type() here???
if value.find(',') != -1:
lvalue = value.split(',')
elif value.find(';') != -1:
lvalue = value.split(';')
elif value.find(':') != -1:
lvalue = value.split(':')
else:
lvalue = [value]
#print 'split value: ', lvalue
if value.find('.') != -1:
lvalue = [float(item) for item in lvalue]
elif len(value) > 0:
lvalue = [int(item) for item in lvalue]
else:
lvalue = 0
#print 'final value: ', lvalue
"""
if len(value) > 1:
value = float(value)
elif len(value) == 1:
value = int(value)
else:
value = 0
valueList = []
valueList.append(value)
"""
print>>f, label, ":", lvalue ###
print>>f, "\n"
# parameters that are ? (bandpass filter specs)
elif dtype == 6:
values = []
for val in ast.literal_eval(str(value)):
values.append(ast.literal_eval(val))
print>>f, label, ":", values
print>>f, "\n"
# parameters that are whole words
elif dtype == 8:
print>>f, label,":"
value = ast.literal_eval(str(value))
for val in value:
val = val.split(',')
f.write(" - ")
flag = 0
for sample in sample_list:
if flag == 0:
space = ""
flag = 1
else:
space = " "
if sample in val:
print>>f, space, sample, ": ", 1
else:
print>>f, space, sample, ": ", 0
print >>f, "\n"
else:
value = ast.literal_eval(str(value))
print>>f, label, ":", value
print>>f, "\n"
f.close()
except Exception, e:
print e
print "Error Writing the pipeline configuration file %s" % path
raise Exception
| bsd-3-clause | 7,788,789,616,095,713,000 | 36.337864 | 129 | 0.483228 | false |
mweisman/QGIS | python/plugins/processing/admintools/AdminToolsAlgorithmProvider.py | 4 | 2875 | # -*- coding: utf-8 -*-
"""
***************************************************************************
AdminToolsAlgorithmProvider.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.admintools.PostGISExecuteSQL import PostGISExecuteSQL
from processing.admintools.ImportIntoPostGIS import ImportIntoPostGIS
from processing.admintools.ImportVectorIntoGeoServer import \
ImportVectorIntoGeoServer
from processing.admintools.CreateWorkspace import CreateWorkspace
from processing.admintools.ImportRasterIntoGeoServer import \
ImportRasterIntoGeoServer
from processing.admintools.DeleteWorkspace import DeleteWorkspace
from processing.admintools.DeleteDatastore import DeleteDatastore
from processing.admintools.CreateStyleGeoServer import CreateStyleGeoServer
class AdminToolsAlgorithmProvider(AlgorithmProvider):
def __init__(self):
AlgorithmProvider.__init__(self)
self.alglist = [
ImportVectorIntoGeoServer(),
ImportRasterIntoGeoServer(),
CreateWorkspace(),
DeleteWorkspace(),
DeleteDatastore(),
CreateStyleGeoServer(),
]
try:
self.alglist.append(ImportIntoPostGIS())
self.alglist.append(PostGISExecuteSQL())
except:
pass
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return 'gspg'
def getDescription(self):
return 'GeoServer/PostGIS tools'
def getIcon(self):
return QtGui.QIcon(os.path.dirname(__file__)
+ '/../images/database.png')
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return False
| gpl-2.0 | -8,173,116,093,009,160,000 | 33.638554 | 75 | 0.577739 | false |
uwescience/raco | examples/sp2bench/sp2bench_rdf_brackets.py | 1 | 8029 | from emitcode import emitCode
import raco.algebra as algebra
from raco.language import CCAlgebra, GrappaAlgebra
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
tr = "sp2bench_1m"
queries = {}
queries['Q1'] = """A(yr) :- %(tr)s(journal, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Journal>'),
%(tr)s(journal, '<http://purl.org/dc/elements/1.1/title>', '\\"Journal 1 (1940)\\"^^<http://www.w3.org/2001/XMLSchema#string>'),
%(tr)s(journal, '<http://purl.org/dc/terms/issued>', yr)"""
#"""A(inproc, author, booktitle, title, proc, ee, page, url, yr, abstract) :- %(tr)s(inproc, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Inproceedings>'),
queries['Q2'] = """A(inproc, author, booktitle, title, proc, ee, page, url, yr) :- %(tr)s(inproc, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Inproceedings>'),
%(tr)s(inproc, '<http://purl.org/dc/elements/1.1/creator>', author),
%(tr)s(inproc, '<http://localhost/vocabulary/bench/booktitle>', booktitle),
%(tr)s(inproc, '<http://purl.org/dc/elements/1.1/title>', title),
%(tr)s(inproc, '<http://purl.org/dc/terms/partOf>', proc),
%(tr)s(inproc, '<http://www.w3.org/2000/01/rdf-schema#seeAlso>', ee),
%(tr)s(inproc, '<http://swrc.ontoware.org/ontology#pages>', page),
%(tr)s(inproc, '<http://xmlns.com/foaf/0.1/homepage>', url),
%(tr)s(inproc, '<http://purl.org/dc/terms/issued>', yr)"""
# TODO: make abstract optional (can do this with a union)
# TODO: order by yr
queries['Q3a'] = """A(article) :- %(tr)s(article, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
%(tr)s(article, property, value),
property = '<http://swrc.ontoware.org/ontology#pages>'"""
queries['Q3b'] = """A(article) :- %(tr)s(article, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
%(tr)s(article, property, value),
property = '<http://swrc.ontoware.org/ontology#month>'"""
queries['Q3c'] = """A(article) :- %(tr)s(article, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
%(tr)s(article, property, value),
property = '<http://swrc.ontoware.org/ontology#isbn>'"""
#queries['Q4'] = """A(name1, name2) :- %(tr)s(article1, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
queries['Q4'] = """A(name1, name2) :- %(tr)s(article1, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
%(tr)s(article2, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
%(tr)s(article1, '<http://purl.org/dc/elements/1.1/creator>', author1),
%(tr)s(author1, '<http://xmlns.com/foaf/0.1/name>', name1),
%(tr)s(article2, '<http://purl.org/dc/elements/1.1/creator>', author2),
%(tr)s(author2, '<http://xmlns.com/foaf/0.1/name>', name2),
%(tr)s(article1, '<http://swrc.ontoware.org/ontology#journal>', journal),
%(tr)s(article2, '<http://swrc.ontoware.org/ontology#journal>', journal)"""
# TODO: name1<name2 condition (not supported
# TODO be sure DISTINCT
# syntactically join with equality;
#queries['Q5a'] = """A(person, name) :- %(tr)s(article, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
queries['Q5a'] = """A(person, name) :- %(tr)s(article, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
%(tr)s(article, '<http://purl.org/dc/elements/1.1/creator>', person),
%(tr)s(inproc, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Inproceedings>'),
%(tr)s(inproc, '<http://purl.org/dc/elements/1.1/creator>', person2),
%(tr)s(person, '<http://xmlns.com/foaf/0.1/name>', name),
%(tr)s(person2, '<http://xmlns.com/foaf/0.1/name>', name2),
name = name2"""
# syntactically join with naming
#TODO: include q5b after issue #104 is addressed
#queries['Q5b'] = """A(person, name) :- %(tr)s(article, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
queries['Q5b'] = """A(person, name) :- %(tr)s(article, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Article>'),
%(tr)s(article, '<http://purl.org/dc/elements/1.1/creator>', person),
%(tr)s(inproc, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://localhost/vocabulary/bench/Inproceedings>'),
%(tr)s(inproc, '<http://purl.org/dc/elements/1.1/creator>', person),
%(tr)s(person, '<http://xmlns.com/foaf/0.1/name>', name)"""
# TODO: Q6 requires negation
# TODO: Q7 requires double negation
#TODO: enable Q8, after dealing with HashJoin( $0 != $7 ) type of cases
#queries['Q8'] = """Erdoes(erdoes) :- %(tr)s(erdoes, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://xmlns.com/foaf/0.1/Person>'),
_ = """Erdoes(erdoes) :- %(tr)s(erdoes, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://xmlns.com/foaf/0.1/Person>'),
%(tr)s(erdoes, '<http://xmlns.com/foaf/0.1/name>', '\\"Paul Erdoes\\"^^<http://www.w3.org/2001/XMLSchema#string>')
A(name) :- Erdoes(erdoes),
%(tr)s(doc, '<http://purl.org/dc/elements/1.1/creator>', erdoes),
%(tr)s(doc, '<http://purl.org/dc/elements/1.1/creator>', author),
%(tr)s(doc2, '<http://purl.org/dc/elements/1.1/creator>', author),
%(tr)s(doc2, '<http://purl.org/dc/elements/1.1/creator>', author2),
%(tr)s(author2, '<http://xmlns.com/foaf/0.1/name>', name),
author != erdoes,
doc2 != doc,
author2 != erdoes,
author2 != author
A(name) :- Erdoes(erdoes),
%(tr)s(doc, '<http://purl.org/dc/elements/1.1/creator>', erdoes),
%(tr)s(doc, '<http://purl.org/dc/elements/1.1/creator>', author),
%(tr)s(author, '<http://xmlns.com/foaf/0.1/name>', name),
author != erdoes"""
#TODO be sure DISTINCT
queries['Q9'] = """A(predicate) :- %(tr)s(person, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://xmlns.com/foaf/0.1/Person>'),
%(tr)s(subject, predicate, person)
A(predicate) :- %(tr)s(person, '<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>', '<http://xmlns.com/foaf/0.1/Person>'),
%(tr)s(person, predicate, object)"""
#TODO be sure DISTINT
queries['Q10'] = """A(subj, pred) :- %(tr)s(subj, pred, '<http://localhost/persons/Paul_Erdoes>')"""
queries['Q11'] = """A(ee) :- %(tr)s(publication, '<http://www.w3.org/2000/01/rdf-schema#seeAlso>', ee)"""
#TODO order by, limit, offset
alg = CCAlgebra
prefix=""
import sys
if len(sys.argv) > 1:
if sys.argv[1] == "grappa" or sys.argv[1] == "g":
print "using grappa"
alg = GrappaAlgebra
prefix="grappa"
plan = None
if len(sys.argv) > 2:
plan = sys.argv[2]
for name, query in queries.iteritems():
query = query % locals()
lst = []
if prefix: lst.append(prefix)
if plan: lst.append(plan)
if name: lst.append(name)
emitCode(query, "_".join(lst), alg, plan)
| bsd-3-clause | 6,690,524,987,756,424,000 | 60.290076 | 204 | 0.567318 | false |
valdecdev/odoo | addons/fetchmail/fetchmail.py | 1 | 14764 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import time
from imaplib import IMAP4
from imaplib import IMAP4_SSL
from poplib import POP3
from poplib import POP3_SSL
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import zipfile
import base64
from openerp import addons
from openerp.osv import fields, osv
from openerp import tools, api
from openerp.tools.translate import _
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
MAX_POP_MESSAGES = 50
class fetchmail_server(osv.osv):
"""Incoming POP/IMAP mail server account"""
_name = 'fetchmail.server'
_description = "POP/IMAP Server"
_order = 'priority'
_columns = {
'name':fields.char('Name', required=True, readonly=False),
'active':fields.boolean('Active', required=False),
'state':fields.selection([
('draft', 'Not Confirmed'),
('done', 'Confirmed'),
], 'Status', select=True, readonly=True, copy=False),
'server' : fields.char('Server Name', readonly=True, help="Hostname or IP of the mail server", states={'draft':[('readonly', False)]}),
'port' : fields.integer('Port', readonly=True, states={'draft':[('readonly', False)]}),
'type':fields.selection([
('pop', 'POP Server'),
('imap', 'IMAP Server'),
('local', 'Local Server'),
], 'Server Type', select=True, required=True, readonly=False),
'is_ssl':fields.boolean('SSL/TLS', help="Connections are encrypted with SSL/TLS through a dedicated port (default: IMAPS=993, POP3S=995)"),
'attach':fields.boolean('Keep Attachments', help="Whether attachments should be downloaded. "
"If not enabled, incoming emails will be stripped of any attachments before being processed"),
'original':fields.boolean('Keep Original', help="Whether a full original copy of each email should be kept for reference"
"and attached to each processed message. This will usually double the size of your message database."),
'date': fields.datetime('Last Fetch Date', readonly=True),
'user' : fields.char('Username', readonly=True, states={'draft':[('readonly', False)]}),
'password' : fields.char('Password', readonly=True, states={'draft':[('readonly', False)]}),
'action_id':fields.many2one('ir.actions.server', 'Server Action', help="Optional custom server action to trigger for each incoming mail, "
"on the record that was created or updated by this mail"),
'object_id': fields.many2one('ir.model', "Create a New Record", help="Process each incoming mail as part of a conversation "
"corresponding to this document type. This will create "
"new documents for new conversations, or attach follow-up "
"emails to the existing conversations (documents)."),
'priority': fields.integer('Server Priority', readonly=True, states={'draft':[('readonly', False)]}, help="Defines the order of processing, "
"lower values mean higher priority"),
'message_ids': fields.one2many('mail.mail', 'fetchmail_server_id', 'Messages', readonly=True),
'configuration' : fields.text('Configuration', readonly=True),
'script' : fields.char('Script', readonly=True),
}
_defaults = {
'state': "draft",
'type': "pop",
'active': True,
'priority': 5,
'attach': True,
'script': '/mail/static/scripts/openerp_mailgate.py',
}
def onchange_server_type(self, cr, uid, ids, server_type=False, ssl=False, object_id=False):
port = 0
values = {}
if server_type == 'pop':
port = ssl and 995 or 110
elif server_type == 'imap':
port = ssl and 993 or 143
else:
values['server'] = ''
values['port'] = port
conf = {
'dbname' : cr.dbname,
'uid' : uid,
'model' : 'MODELNAME',
}
if object_id:
m = self.pool.get('ir.model')
r = m.read(cr,uid,[object_id],['model'])
conf['model']=r[0]['model']
values['configuration'] = """Use the below script with the following command line options with your Mail Transport Agent (MTA)
openerp_mailgate.py --host=HOSTNAME --port=PORT -u %(uid)d -p PASSWORD -d %(dbname)s
Example configuration for the postfix mta running locally:
/etc/postfix/virtual_aliases:
@youdomain openerp_mailgate@localhost
/etc/aliases:
openerp_mailgate: "|/path/to/openerp-mailgate.py --host=localhost -u %(uid)d -p PASSWORD -d %(dbname)s"
""" % conf
return {'value':values}
def set_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids , {'state':'draft'})
return True
@api.cr_uid_ids_context
def connect(self, cr, uid, server_id, context=None):
if isinstance(server_id, (list,tuple)):
server_id = server_id[0]
server = self.browse(cr, uid, server_id, context)
if server.type == 'imap':
if server.is_ssl:
connection = IMAP4_SSL(server.server, int(server.port))
else:
connection = IMAP4(server.server, int(server.port))
connection.login(server.user, server.password)
elif server.type == 'pop':
if server.is_ssl:
connection = POP3_SSL(server.server, int(server.port))
else:
connection = POP3(server.server, int(server.port))
#TODO: use this to remove only unread messages
#connection.user("recent:"+server.user)
connection.user(server.user)
connection.pass_(server.password)
return connection
def button_confirm_login(self, cr, uid, ids, context=None):
if context is None:
context = {}
for server in self.browse(cr, uid, ids, context=context):
try:
connection = server.connect()
server.write({'state':'done'})
except Exception, e:
_logger.info("Failed to connect to %s server %s.", server.type, server.name, exc_info=True)
raise UserError(_("Connection test failed: %s") % tools.ustr(e))
finally:
try:
if connection:
if server.type == 'imap':
connection.close()
elif server.type == 'pop':
connection.quit()
except Exception:
# ignored, just a consequence of the previous exception
pass
return True
def _fetch_mails(self, cr, uid, ids=False, context=None):
if not ids:
ids = self.search(cr, uid, [('state','=','done'),('type','in',['pop','imap'])])
return self.fetch_mail(cr, uid, ids, context=context)
def fetch_mail(self, cr, uid, ids, context=None):
"""WARNING: meant for cron usage only - will commit() after each email!"""
context = dict(context or {})
context['fetchmail_cron_running'] = True
mail_thread = self.pool.get('mail.thread')
action_pool = self.pool.get('ir.actions.server')
for server in self.browse(cr, uid, ids, context=context):
_logger.info('start checking for new emails on %s server %s', server.type, server.name)
context.update({'fetchmail_server_id': server.id, 'server_type': server.type})
count, failed = 0, 0
imap_server = False
pop_server = False
if server.type == 'imap':
try:
imap_server = server.connect()
imap_server.select()
result, data = imap_server.search(None, '(UNSEEN)')
for num in data[0].split():
res_id = None
result, data = imap_server.fetch(num, '(RFC822)')
imap_server.store(num, '-FLAGS', '\\Seen')
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
data[0][1],
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
except Exception:
_logger.info('Failed to process mail from %s server %s.', server.type, server.name, exc_info=True)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
imap_server.store(num, '+FLAGS', '\\Seen')
cr.commit()
count += 1
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.type, server.name, (count - failed), failed)
except Exception:
_logger.info("General failure when trying to fetch mail from %s server %s.", server.type, server.name, exc_info=True)
finally:
if imap_server:
imap_server.close()
imap_server.logout()
elif server.type == 'pop':
try:
while True:
pop_server = server.connect()
(numMsgs, totalSize) = pop_server.stat()
pop_server.list()
for num in range(1, min(MAX_POP_MESSAGES, numMsgs) + 1):
(header, msges, octets) = pop_server.retr(num)
msg = '\n'.join(msges)
res_id = None
try:
res_id = mail_thread.message_process(cr, uid, server.object_id.model,
msg,
save_original=server.original,
strip_attachments=(not server.attach),
context=context)
pop_server.dele(num)
except Exception:
_logger.info('Failed to process mail from %s server %s.', server.type, server.name, exc_info=True)
failed += 1
if res_id and server.action_id:
action_pool.run(cr, uid, [server.action_id.id], {'active_id': res_id, 'active_ids': [res_id], 'active_model': context.get("thread_model", server.object_id.model)})
cr.commit()
if numMsgs < MAX_POP_MESSAGES:
break
pop_server.quit()
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", numMsgs, server.type, server.name, (numMsgs - failed), failed)
except Exception:
_logger.info("General failure when trying to fetch mail from %s server %s.", server.type, server.name, exc_info=True)
finally:
if pop_server:
pop_server.quit()
server.write({'date': time.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)})
return True
def _update_cron(self, cr, uid, context=None):
if context and context.get('fetchmail_cron_running'):
return
try:
cron = self.pool['ir.model.data'].get_object(
cr, uid, 'fetchmail', 'ir_cron_mail_gateway_action', context=context)
except ValueError:
# Nevermind if default cron cannot be found
return
# Enabled/Disable cron based on the number of 'done' server of type pop or imap
cron.toggle(model=self._name, domain=[('state','=','done'), ('type','in',['pop','imap'])])
def create(self, cr, uid, values, context=None):
res = super(fetchmail_server, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(fetchmail_server, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(fetchmail_server, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class mail_mail(osv.osv):
_inherit = "mail.mail"
_columns = {
'fetchmail_server_id': fields.many2one('fetchmail.server', "Inbound Mail Server",
readonly=True,
select=True,
oldname='server_id'),
}
def create(self, cr, uid, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).create(cr, uid, values, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
if context is None:
context = {}
fetchmail_server_id = context.get('fetchmail_server_id')
if fetchmail_server_id:
values['fetchmail_server_id'] = fetchmail_server_id
res = super(mail_mail, self).write(cr, uid, ids, values, context=context)
return res
| agpl-3.0 | 7,128,425,773,505,944,000 | 48.878378 | 195 | 0.519236 | false |
yamt/tempest | tempest/api/orchestration/stacks/test_neutron_resources.py | 5 | 8944 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
from tempest_lib.common.utils import data_utils
from tempest.api.orchestration import base
from tempest import clients
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
@classmethod
def skip_checks(cls):
super(NeutronResourcesTestJSON, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException("Neutron support is required")
@classmethod
def setup_credentials(cls):
super(NeutronResourcesTestJSON, cls).setup_credentials()
cls.os = clients.Manager()
@classmethod
def setup_clients(cls):
super(NeutronResourcesTestJSON, cls).setup_clients()
cls.network_client = cls.os.network_client
@classmethod
def resource_setup(cls):
super(NeutronResourcesTestJSON, cls).resource_setup()
cls.neutron_basic_template = cls.load_template('neutron_basic')
cls.stack_name = data_utils.rand_name('heat')
template = cls.read_template('neutron_basic')
cls.keypair_name = (CONF.orchestration.keypair_name or
cls._create_keypair()['name'])
cls.external_network_id = CONF.network.public_network_id
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
cls.subnet_cidr = tenant_cidr.subnet(mask_bits).next()
# create the stack
cls.stack_identifier = cls.create_stack(
cls.stack_name,
template,
parameters={
'KeyName': cls.keypair_name,
'InstanceType': CONF.orchestration.instance_type,
'ImageId': CONF.compute.image_ref,
'ExternalNetworkId': cls.external_network_id,
'timeout': CONF.orchestration.build_timeout,
'DNSServers': CONF.network.dns_servers,
'SubNetCidr': str(cls.subnet_cidr)
})
cls.stack_id = cls.stack_identifier.split('/')[1]
try:
cls.client.wait_for_stack_status(cls.stack_id, 'CREATE_COMPLETE')
resources = cls.client.list_resources(cls.stack_identifier)
except exceptions.TimeoutException as e:
if CONF.compute_feature_enabled.console_output:
# attempt to log the server console to help with debugging
# the cause of the server not signalling the waitcondition
# to heat.
body = cls.client.show_resource(cls.stack_identifier,
'Server')
server_id = body['physical_resource_id']
LOG.debug('Console output for %s', server_id)
output = cls.servers_client.get_console_output(
server_id, None).data
LOG.debug(output)
raise e
cls.test_resources = {}
for resource in resources:
cls.test_resources[resource['logical_resource_id']] = resource
@test.idempotent_id('f9e2664c-bc44-4eef-98b6-495e4f9d74b3')
def test_created_resources(self):
"""Verifies created neutron resources."""
resources = [('Network', self.neutron_basic_template['resources'][
'Network']['type']),
('Subnet', self.neutron_basic_template['resources'][
'Subnet']['type']),
('RouterInterface', self.neutron_basic_template[
'resources']['RouterInterface']['type']),
('Server', self.neutron_basic_template['resources'][
'Server']['type'])]
for resource_name, resource_type in resources:
resource = self.test_resources.get(resource_name, None)
self.assertIsInstance(resource, dict)
self.assertEqual(resource_name, resource['logical_resource_id'])
self.assertEqual(resource_type, resource['resource_type'])
self.assertEqual('CREATE_COMPLETE', resource['resource_status'])
@test.idempotent_id('c572b915-edb1-4e90-b196-c7199a6848c0')
@test.services('network')
def test_created_network(self):
"""Verifies created network."""
network_id = self.test_resources.get('Network')['physical_resource_id']
body = self.network_client.show_network(network_id)
network = body['network']
self.assertIsInstance(network, dict)
self.assertEqual(network_id, network['id'])
self.assertEqual(self.neutron_basic_template['resources'][
'Network']['properties']['name'], network['name'])
@test.idempotent_id('e8f84b96-f9d7-4684-ad5f-340203e9f2c2')
@test.services('network')
def test_created_subnet(self):
"""Verifies created subnet."""
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
body = self.network_client.show_subnet(subnet_id)
subnet = body['subnet']
network_id = self.test_resources.get('Network')['physical_resource_id']
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(network_id, subnet['network_id'])
self.assertEqual(self.neutron_basic_template['resources'][
'Subnet']['properties']['name'], subnet['name'])
self.assertEqual(sorted(CONF.network.dns_servers),
sorted(subnet['dns_nameservers']))
self.assertEqual(self.neutron_basic_template['resources'][
'Subnet']['properties']['ip_version'], subnet['ip_version'])
self.assertEqual(str(self.subnet_cidr), subnet['cidr'])
@test.idempotent_id('96af4c7f-5069-44bc-bdcf-c0390f8a67d1')
@test.services('network')
def test_created_router(self):
"""Verifies created router."""
router_id = self.test_resources.get('Router')['physical_resource_id']
body = self.network_client.show_router(router_id)
router = body['router']
self.assertEqual(self.neutron_basic_template['resources'][
'Router']['properties']['name'], router['name'])
self.assertEqual(self.external_network_id,
router['external_gateway_info']['network_id'])
self.assertEqual(True, router['admin_state_up'])
@test.idempotent_id('89f605bd-153e-43ee-a0ed-9919b63423c5')
@test.services('network')
def test_created_router_interface(self):
"""Verifies created router interface."""
router_id = self.test_resources.get('Router')['physical_resource_id']
network_id = self.test_resources.get('Network')['physical_resource_id']
subnet_id = self.test_resources.get('Subnet')['physical_resource_id']
body = self.network_client.list_ports()
ports = body['ports']
router_ports = filter(lambda port: port['device_id'] ==
router_id, ports)
created_network_ports = filter(lambda port: port['network_id'] ==
network_id, router_ports)
self.assertEqual(1, len(created_network_ports))
router_interface = created_network_ports[0]
fixed_ips = router_interface['fixed_ips']
subnet_fixed_ips = filter(lambda port: port['subnet_id'] ==
subnet_id, fixed_ips)
self.assertEqual(1, len(subnet_fixed_ips))
router_interface_ip = subnet_fixed_ips[0]['ip_address']
self.assertEqual(str(self.subnet_cidr.iter_hosts().next()),
router_interface_ip)
@test.idempotent_id('75d85316-4ac2-4c0e-a1a9-edd2148fc10e')
@test.services('compute', 'network')
def test_created_server(self):
"""Verifies created sever."""
server_id = self.test_resources.get('Server')['physical_resource_id']
server = self.servers_client.get_server(server_id)
self.assertEqual(self.keypair_name, server['key_name'])
self.assertEqual('ACTIVE', server['status'])
network = server['addresses'][self.neutron_basic_template['resources'][
'Network']['properties']['name']][0]
self.assertEqual(4, network['version'])
self.assertIn(netaddr.IPAddress(network['addr']), self.subnet_cidr)
| apache-2.0 | 9,194,123,329,594,989,000 | 45.583333 | 79 | 0.621758 | false |
hasecbinusr/pysal | pysal/spreg/diagnostics_sp.py | 9 | 22185 | """
Spatial diagnostics module
"""
__author__ = "Luc Anselin [email protected], Daniel Arribas-Bel [email protected]"
from utils import spdot
from scipy.stats.stats import chisqprob
from scipy.stats import norm
import numpy as np
import numpy.linalg as la
__all__ = ['LMtests', 'MoranRes', 'AKtest']
class LMtests:
"""
Lagrange Multiplier tests. Implemented as presented in Anselin et al.
(1996) [Anselin1996a]_
...
Attributes
----------
ols : OLS
OLS regression object
w : W
Spatial weights instance
tests : list
Lists of strings with the tests desired to be performed.
Values may be:
* 'all': runs all the options (default)
* 'lme': LM error test
* 'rlme': Robust LM error test
* 'lml' : LM lag test
* 'rlml': Robust LM lag test
Parameters
----------
lme : tuple
(Only if 'lme' or 'all' was in tests). Pair of statistic and
p-value for the LM error test.
lml : tuple
(Only if 'lml' or 'all' was in tests). Pair of statistic and
p-value for the LM lag test.
rlme : tuple
(Only if 'rlme' or 'all' was in tests). Pair of statistic
and p-value for the Robust LM error test.
rlml : tuple
(Only if 'rlml' or 'all' was in tests). Pair of statistic
and p-value for the Robust LM lag test.
sarma : tuple
(Only if 'rlml' or 'all' was in tests). Pair of statistic
and p-value for the SARMA test.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> from ols import OLS
Open the csv file to access the data for analysis
>>> csv = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Pull out from the csv the files we need ('HOVAL' as dependent as well as
'INC' and 'CRIME' as independent) and directly transform them into nx1 and
nx2 arrays, respectively
>>> y = np.array([csv.by_col('HOVAL')]).T
>>> x = np.array([csv.by_col('INC'), csv.by_col('CRIME')]).T
Create the weights object from existing .gal file
>>> w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
Row-standardize the weight object (not required although desirable in some
cases)
>>> w.transform='r'
Run an OLS regression
>>> ols = OLS(y, x)
Run all the LM tests in the residuals. These diagnostics test for the
presence of remaining spatial autocorrelation in the residuals of an OLS
model and give indication about the type of spatial model. There are five
types: presence of a spatial lag model (simple and robust version),
presence of a spatial error model (simple and robust version) and joint presence
of both a spatial lag as well as a spatial error model.
>>> lms = pysal.spreg.diagnostics_sp.LMtests(ols, w)
LM error test:
>>> print round(lms.lme[0],4), round(lms.lme[1],4)
3.0971 0.0784
LM lag test:
>>> print round(lms.lml[0],4), round(lms.lml[1],4)
0.9816 0.3218
Robust LM error test:
>>> print round(lms.rlme[0],4), round(lms.rlme[1],4)
3.2092 0.0732
Robust LM lag test:
>>> print round(lms.rlml[0],4), round(lms.rlml[1],4)
1.0936 0.2957
LM SARMA test:
>>> print round(lms.sarma[0],4), round(lms.sarma[1],4)
4.1907 0.123
"""
def __init__(self, ols, w, tests=['all']):
cache = spDcache(ols, w)
if tests == ['all']:
tests = ['lme', 'lml', 'rlme', 'rlml', 'sarma']
if 'lme' in tests:
self.lme = lmErr(ols, w, cache)
if 'lml' in tests:
self.lml = lmLag(ols, w, cache)
if 'rlme' in tests:
self.rlme = rlmErr(ols, w, cache)
if 'rlml' in tests:
self.rlml = rlmLag(ols, w, cache)
if 'sarma' in tests:
self.sarma = lmSarma(ols, w, cache)
class MoranRes:
"""
Moran's I for spatial autocorrelation in residuals from OLS regression
...
Parameters
----------
ols : OLS
OLS regression object
w : W
Spatial weights instance
z : boolean
If set to True computes attributes eI, vI and zI. Due to computational burden of vI, defaults to False.
Attributes
----------
I : float
Moran's I statistic
eI : float
Moran's I expectation
vI : float
Moran's I variance
zI : float
Moran's I standardized value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> from ols import OLS
Open the csv file to access the data for analysis
>>> csv = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Pull out from the csv the files we need ('HOVAL' as dependent as well as
'INC' and 'CRIME' as independent) and directly transform them into nx1 and
nx2 arrays, respectively
>>> y = np.array([csv.by_col('HOVAL')]).T
>>> x = np.array([csv.by_col('INC'), csv.by_col('CRIME')]).T
Create the weights object from existing .gal file
>>> w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
Row-standardize the weight object (not required although desirable in some
cases)
>>> w.transform='r'
Run an OLS regression
>>> ols = OLS(y, x)
Run Moran's I test for residual spatial autocorrelation in an OLS model.
This computes the traditional statistic applying a correction in the
expectation and variance to account for the fact it comes from residuals
instead of an independent variable
>>> m = pysal.spreg.diagnostics_sp.MoranRes(ols, w, z=True)
Value of the Moran's I statistic:
>>> print round(m.I,4)
0.1713
Value of the Moran's I expectation:
>>> print round(m.eI,4)
-0.0345
Value of the Moran's I variance:
>>> print round(m.vI,4)
0.0081
Value of the Moran's I standardized value. This is
distributed as a standard Normal(0, 1)
>>> print round(m.zI,4)
2.2827
P-value of the standardized Moran's I value (z):
>>> print round(m.p_norm,4)
0.0224
"""
def __init__(self, ols, w, z=False):
cache = spDcache(ols, w)
self.I = get_mI(ols, w, cache)
if z:
self.eI = get_eI(ols, w, cache)
self.vI = get_vI(ols, w, self.eI, cache)
self.zI, self.p_norm = get_zI(self.I, self.eI, self.vI)
class AKtest:
"""
Moran's I test of spatial autocorrelation for IV estimation.
Implemented following the original reference Anselin and Kelejian
(1997) [Anselin1997]_
...
Parameters
----------
iv : TSLS
Regression object from TSLS class
w : W
Spatial weights instance
case : string
Flag for special cases (default to 'nosp'):
* 'nosp': Only NO spatial end. reg.
* 'gen': General case (spatial lag + end. reg.)
Attributes
----------
mi : float
Moran's I statistic for IV residuals
ak : float
Square of corrected Moran's I for residuals::
.. math::
ak = \dfrac{N \times I^*}{\phi^2}
Note: if case='nosp' then it simplifies to the LMerror
p : float
P-value of the test
Examples
--------
We first need to import the needed modules. Numpy is needed to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis. The TSLS is required to run the model on
which we will perform the tests.
>>> import numpy as np
>>> import pysal
>>> from twosls import TSLS
>>> from twosls_sp import GM_Lag
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Before being able to apply the diagnostics, we have to run a model and,
for that, we need the input variables. Extract the CRIME column (crime
rates) from the DBF file and make it the dependent variable for the
regression. Note that PySAL requires this to be an numpy array of shape
(n, 1) as opposed to the also common shape of (n, ) that other packages
accept.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in, but this can be overridden by passing
constant=False.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case, we consider HOVAL (home value) as an endogenous regressor,
so we acknowledge that by reading it in a different category.
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
In order to properly account for the endogeneity, we have to pass in the
instruments. Let us consider DISCBD (distance to the CBD) is a good one:
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Now we are good to run the model. It is an easy one line task.
>>> reg = TSLS(y, X, yd, q=q)
Now we are concerned with whether our non-spatial model presents spatial
autocorrelation in the residuals. To assess this possibility, we can run
the Anselin-Kelejian test, which is a version of the classical LM error
test adapted for the case of residuals from an instrumental variables (IV)
regression. First we need an extra object, the weights matrix, which
includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are good to run the test. It is a very simple task:
>>> ak = AKtest(reg, w)
And explore the information obtained:
>>> print('AK test: %f\tP-value: %f'%(ak.ak, ak.p))
AK test: 4.642895 P-value: 0.031182
The test also accomodates the case when the residuals come from an IV
regression that includes a spatial lag of the dependent variable. The only
requirement needed is to modify the ``case`` parameter when we call
``AKtest``. First, let us run a spatial lag model:
>>> reg_lag = GM_Lag(y, X, yd, q=q, w=w)
And now we can run the AK test and obtain similar information as in the
non-spatial model.
>>> ak_sp = AKtest(reg, w, case='gen')
>>> print('AK test: %f\tP-value: %f'%(ak_sp.ak, ak_sp.p))
AK test: 1.157593 P-value: 0.281965
"""
def __init__(self, iv, w, case='nosp'):
if case == 'gen':
cache = spDcache(iv, w)
self.mi, self.ak, self.p = akTest(iv, w, cache)
elif case == 'nosp':
cache = spDcache(iv, w)
self.mi = get_mI(iv, w, cache)
self.ak, self.p = lmErr(iv, w, cache)
else:
print """\n
Fix the optional argument 'case' to match the requirements:
* 'gen': General case (spatial lag + end. reg.)
* 'nosp': No spatial end. reg.
\n"""
class spDcache:
"""
Helper class to compute reusable pieces in the spatial diagnostics module
...
Parameters
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
Attributes
----------
j : array
1x1 array with the result from:
.. math::
J = \dfrac{1}{[(WX\beta)' M (WX\beta) + T \sigma^2]}
wu : array
nx1 array with spatial lag of the residuals
utwuDs : array
1x1 array with the result from:
.. math::
utwuDs = \dfrac{u' W u}{\tilde{\sigma^2}}
utwyDs : array
1x1 array with the result from:
.. math::
utwyDs = \dfrac{u' W y}{\tilde{\sigma^2}}
t : array
1x1 array with the result from :
.. math::
T = tr[(W' + W) W]
trA : float
Trace of A as in Cliff & Ord (1981)
"""
def __init__(self, reg, w):
self.reg = reg
self.w = w
self._cache = {}
@property
def j(self):
if 'j' not in self._cache:
wxb = self.w.sparse * self.reg.predy
wxb2 = np.dot(wxb.T, wxb)
xwxb = spdot(self.reg.x.T, wxb)
num1 = wxb2 - np.dot(xwxb.T, np.dot(self.reg.xtxi, xwxb))
num = num1 + (self.t * self.reg.sig2n)
den = self.reg.n * self.reg.sig2n
self._cache['j'] = num / den
return self._cache['j']
@property
def wu(self):
if 'wu' not in self._cache:
self._cache['wu'] = self.w.sparse * self.reg.u
return self._cache['wu']
@property
def utwuDs(self):
if 'utwuDs' not in self._cache:
res = np.dot(self.reg.u.T, self.wu) / self.reg.sig2n
self._cache['utwuDs'] = res
return self._cache['utwuDs']
@property
def utwyDs(self):
if 'utwyDs' not in self._cache:
res = np.dot(self.reg.u.T, self.w.sparse * self.reg.y)
self._cache['utwyDs'] = res / self.reg.sig2n
return self._cache['utwyDs']
@property
def t(self):
if 't' not in self._cache:
prod = (self.w.sparse.T + self.w.sparse) * self.w.sparse
self._cache['t'] = np.sum(prod.diagonal())
return self._cache['t']
@property
def trA(self):
if 'trA' not in self._cache:
xtwx = spdot(self.reg.x.T, spdot(self.w.sparse, self.reg.x))
mw = np.dot(self.reg.xtxi, xtwx)
self._cache['trA'] = np.sum(mw.diagonal())
return self._cache['trA']
@property
def AB(self):
"""
Computes A and B matrices as in Cliff-Ord 1981, p. 203
"""
if 'AB' not in self._cache:
U = (self.w.sparse + self.w.sparse.T) / 2.
z = spdot(U, self.reg.x, array_out=False)
c1 = spdot(self.reg.x.T, z, array_out=False)
c2 = spdot(z.T, z, array_out=False)
G = self.reg.xtxi
A = spdot(G, c1)
B = spdot(G, c2)
self._cache['AB'] = [A, B]
return self._cache['AB']
def lmErr(reg, w, spDcache):
"""
LM error test. Implemented as presented in eq. (9) of Anselin et al.
(1996) [Anselin1996a]_
...
Attributes
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
lme : tuple
Pair of statistic and p-value for the LM error test.
"""
lm = spDcache.utwuDs ** 2 / spDcache.t
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def lmLag(ols, w, spDcache):
"""
LM lag test. Implemented as presented in eq. (13) of Anselin et al.
(1996) [Anselin1996a]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
lml : tuple
Pair of statistic and p-value for the LM lag test.
"""
lm = spDcache.utwyDs ** 2 / (ols.n * spDcache.j)
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def rlmErr(ols, w, spDcache):
"""
Robust LM error test. Implemented as presented in eq. (8) of Anselin et
al. (1996) [Anselin1996a]_
NOTE: eq. (8) has an errata, the power -1 in the denominator should be inside the square bracket.
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
rlme : tuple
Pair of statistic and p-value for the Robust LM error test.
"""
nj = ols.n * spDcache.j
num = (spDcache.utwuDs - (spDcache.t * spDcache.utwyDs) / nj) ** 2
den = spDcache.t * (1. - (spDcache.t / nj))
lm = num / den
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def rlmLag(ols, w, spDcache):
"""
Robust LM lag test. Implemented as presented in eq. (12) of Anselin et al.
(1996) [Anselin1996a]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
rlml : tuple
Pair of statistic and p-value for the Robust LM lag test.
"""
lm = (spDcache.utwyDs - spDcache.utwuDs) ** 2 / \
((ols.n * spDcache.j) - spDcache.t)
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def lmSarma(ols, w, spDcache):
"""
LM error test. Implemented as presented in eq. (15) of Anselin et al.
(1996) [Anselin1996a]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
sarma : tuple
Pair of statistic and p-value for the LM sarma test.
"""
first = (spDcache.utwyDs - spDcache.utwuDs) ** 2 / \
(w.n * spDcache.j - spDcache.t)
secnd = spDcache.utwuDs ** 2 / spDcache.t
lm = first + secnd
pval = chisqprob(lm, 2)
return (lm[0][0], pval[0][0])
def get_mI(reg, w, spDcache):
"""
Moran's I statistic of spatial autocorrelation as showed in Cliff & Ord
(1981) [Cliff1981]_, p. 201-203
...
Attributes
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
moran : float
Statistic Moran's I test.
"""
mi = (w.n * np.dot(reg.u.T, spDcache.wu)) / (w.s0 * reg.utu)
return mi[0][0]
def get_vI(ols, w, ei, spDcache):
"""
Moran's I variance coded as in Cliff & Ord 1981 (p. 201-203) and R's spdep
"""
A = spDcache.AB[0]
trA2 = np.dot(A, A)
trA2 = np.sum(trA2.diagonal())
B = spDcache.AB[1]
trB = np.sum(B.diagonal()) * 4.
vi = (w.n ** 2 / (w.s0 ** 2 * (w.n - ols.k) * (w.n - ols.k + 2.))) * \
(w.s1 + 2. * trA2 - trB -
((2. * (spDcache.trA ** 2)) / (w.n - ols.k)))
return vi
def get_eI(ols, w, spDcache):
"""
Moran's I expectation using matrix M
"""
return - (w.n * spDcache.trA) / (w.s0 * (w.n - ols.k))
def get_zI(I, ei, vi):
"""
Standardized I
Returns two-sided p-values as provided in the GeoDa family
"""
z = abs((I - ei) / np.sqrt(vi))
pval = norm.sf(z) * 2.
return (z, pval)
def akTest(iv, w, spDcache):
"""
Computes AK-test for the general case (end. reg. + sp. lag)
...
Parameters
----------
iv : STSLS_dev
Instance from spatial 2SLS regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Attributes
----------
mi : float
Moran's I statistic for IV residuals
ak : float
Square of corrected Moran's I for residuals::
.. math::
ak = \dfrac{N \times I^*}{\phi^2}
p : float
P-value of the test
ToDo:
* Code in as Nancy
* Compare both
"""
mi = get_mI(iv, w, spDcache)
# Phi2
etwz = spdot(iv.u.T, spdot(w.sparse, iv.z))
a = np.dot(etwz, np.dot(iv.varb, etwz.T))
s12 = (w.s0 / w.n) ** 2
phi2 = (spDcache.t + (4.0 / iv.sig2n) * a) / (s12 * w.n)
ak = w.n * mi ** 2 / phi2
pval = chisqprob(ak, 1)
return (mi, ak[0][0], pval[0][0])
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| bsd-3-clause | -4,186,144,807,814,660,600 | 27.333333 | 121 | 0.543791 | false |
SivilTaram/edx-platform | common/lib/xmodule/xmodule/seq_module.py | 30 | 12026 | """
xModule implementation of a learning sequence
"""
# pylint: disable=abstract-method
import json
import logging
import warnings
from lxml import etree
from xblock.core import XBlock
from xblock.fields import Integer, Scope, Boolean
from xblock.fragment import Fragment
from pkg_resources import resource_string
from .exceptions import NotFoundError
from .fields import Date
from .mako_module import MakoModuleDescriptor
from .progress import Progress
from .x_module import XModule, STUDENT_VIEW
from .xml_module import XmlDescriptor
log = logging.getLogger(__name__)
# HACK: This shouldn't be hard-coded to two types
# OBSOLETE: This obsoletes 'type'
class_priority = ['video', 'problem']
# Make '_' a no-op so we can scrape strings
_ = lambda text: text
class SequenceFields(object):
has_children = True
# NOTE: Position is 1-indexed. This is silly, but there are now student
# positions saved on prod, so it's not easy to fix.
position = Integer(help="Last tab viewed in this sequence", scope=Scope.user_state)
due = Date(
display_name=_("Due Date"),
help=_("Enter the date by which problems are due."),
scope=Scope.settings,
)
# Entrance Exam flag -- see cms/contentstore/views/entrance_exam.py for usage
is_entrance_exam = Boolean(
display_name=_("Is Entrance Exam"),
help=_(
"Tag this course module as an Entrance Exam. "
"Note, you must enable Entrance Exams for this course setting to take effect."
),
default=False,
scope=Scope.content,
)
class ProctoringFields(object):
"""
Fields that are specific to Proctored or Timed Exams
"""
is_time_limited = Boolean(
display_name=_("Is Time Limited"),
help=_(
"This setting indicates whether students have a limited time"
" to view or interact with this courseware component."
),
default=False,
scope=Scope.settings,
)
default_time_limit_minutes = Integer(
display_name=_("Time Limit in Minutes"),
help=_(
"The number of minutes available to students for viewing or interacting with this courseware component."
),
default=None,
scope=Scope.settings,
)
is_proctored_enabled = Boolean(
display_name=_("Is Proctoring Enabled"),
help=_(
"This setting indicates whether this exam is a proctored exam."
),
default=False,
scope=Scope.settings,
)
is_practice_exam = Boolean(
display_name=_("Is Practice Exam"),
help=_(
"This setting indicates whether this exam is for testing purposes only. Practice exams are not verified."
),
default=False,
scope=Scope.settings,
)
@XBlock.wants('proctoring')
@XBlock.wants('credit')
class SequenceModule(SequenceFields, ProctoringFields, XModule):
''' Layout module which lays out content in a temporal sequence
'''
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/display.coffee')],
'js': [resource_string(__name__, 'js/src/sequence/display/jquery.sequence.js')],
}
css = {
'scss': [resource_string(__name__, 'css/sequence/display.scss')],
}
js_module_name = "Sequence"
def __init__(self, *args, **kwargs):
super(SequenceModule, self).__init__(*args, **kwargs)
# If position is specified in system, then use that instead.
position = getattr(self.system, 'position', None)
if position is not None:
try:
self.position = int(self.system.position)
except (ValueError, TypeError):
# Check for https://openedx.atlassian.net/browse/LMS-6496
warnings.warn(
"Sequential position cannot be converted to an integer: {pos!r}".format(
pos=self.system.position,
),
RuntimeWarning,
)
def get_progress(self):
''' Return the total progress, adding total done and total available.
(assumes that each submodule uses the same "units" for progress.)
'''
# TODO: Cache progress or children array?
children = self.get_children()
progresses = [child.get_progress() for child in children]
progress = reduce(Progress.add_counts, progresses, None)
return progress
def handle_ajax(self, dispatch, data): # TODO: bounds checking
''' get = request.POST instance '''
if dispatch == 'goto_position':
# set position to default value if either 'position' argument not
# found in request or it is a non-positive integer
position = data.get('position', u'1')
if position.isdigit() and int(position) > 0:
self.position = int(position)
else:
self.position = 1
return json.dumps({'success': True})
raise NotFoundError('Unexpected dispatch type')
def student_view(self, context):
# If we're rendering this sequence, but no position is set yet,
# default the position to the first element
if self.position is None:
self.position = 1
## Returns a set of all types of all sub-children
contents = []
fragment = Fragment()
# Is this sequential part of a timed or proctored exam?
if self.is_time_limited:
view_html = self._time_limited_student_view(context)
# Do we have an alternate rendering
# from the edx_proctoring subsystem?
if view_html:
fragment.add_content(view_html)
return fragment
for child in self.get_display_items():
progress = child.get_progress()
rendered_child = child.render(STUDENT_VIEW, context)
fragment.add_frag_resources(rendered_child)
titles = child.get_content_titles()
childinfo = {
'content': rendered_child.content,
'title': "\n".join(titles),
'page_title': titles[0] if titles else '',
'progress_status': Progress.to_js_status_str(progress),
'progress_detail': Progress.to_js_detail_str(progress),
'type': child.get_icon_class(),
'id': child.scope_ids.usage_id.to_deprecated_string(),
}
if childinfo['title'] == '':
childinfo['title'] = child.display_name_with_default
contents.append(childinfo)
params = {'items': contents,
'element_id': self.location.html_id(),
'item_id': self.location.to_deprecated_string(),
'position': self.position,
'tag': self.location.category,
'ajax_url': self.system.ajax_url,
}
fragment.add_content(self.system.render_template("seq_module.html", params))
return fragment
def _time_limited_student_view(self, context):
"""
Delegated rendering of a student view when in a time
limited view. This ultimately calls down into edx_proctoring
pip installed djangoapp
"""
# None = no overridden view rendering
view_html = None
proctoring_service = self.runtime.service(self, 'proctoring')
credit_service = self.runtime.service(self, 'credit')
# Is the feature turned on and do we have all required services
# Also, the ENABLE_PROCTORED_EXAMS feature flag must be set to
# True and the Sequence in question, should have the
# fields set to indicate this is a timed/proctored exam
feature_enabled = (
proctoring_service and
credit_service and
proctoring_service.is_feature_enabled()
)
if feature_enabled:
user_id = self.runtime.user_id
user_role_in_course = 'staff' if self.runtime.user_is_staff else 'student'
course_id = self.runtime.course_id
content_id = self.location
context = {
'display_name': self.display_name,
'default_time_limit_mins': (
self.default_time_limit_minutes if
self.default_time_limit_minutes else 0
),
'is_practice_exam': self.is_practice_exam
}
# inject the user's credit requirements and fulfillments
if credit_service:
credit_state = credit_service.get_credit_state(user_id, course_id)
if credit_state:
context.update({
'credit_state': credit_state
})
# See if the edx-proctoring subsystem wants to present
# a special view to the student rather
# than the actual sequence content
#
# This will return None if there is no
# overridden view to display given the
# current state of the user
view_html = proctoring_service.get_student_view(
user_id=user_id,
course_id=course_id,
content_id=content_id,
context=context,
user_role=user_role_in_course
)
return view_html
def get_icon_class(self):
child_classes = set(child.get_icon_class()
for child in self.get_children())
new_class = 'other'
for c in class_priority:
if c in child_classes:
new_class = c
return new_class
class SequenceDescriptor(SequenceFields, ProctoringFields, MakoModuleDescriptor, XmlDescriptor):
"""
A Sequences Descriptor object
"""
mako_template = 'widgets/sequence-edit.html'
module_class = SequenceModule
show_in_read_only_mode = True
js = {
'coffee': [resource_string(__name__, 'js/src/sequence/edit.coffee')],
}
js_module_name = "SequenceDescriptor"
@classmethod
def definition_from_xml(cls, xml_object, system):
children = []
for child in xml_object:
try:
child_block = system.process_xml(etree.tostring(child, encoding='unicode'))
children.append(child_block.scope_ids.usage_id)
except Exception as e:
log.exception("Unable to load child when parsing Sequence. Continuing...")
if system.error_tracker is not None:
system.error_tracker(u"ERROR: {0}".format(e))
continue
return {}, children
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('sequential')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
@property
def non_editable_metadata_fields(self):
"""
`is_entrance_exam` should not be editable in the Studio settings editor.
"""
non_editable_fields = super(SequenceDescriptor, self).non_editable_metadata_fields
non_editable_fields.append(self.fields['is_entrance_exam'])
return non_editable_fields
def index_dictionary(self):
"""
Return dictionary prepared with module content and type for indexing.
"""
# return key/value fields in a Python dict object
# values may be numeric / string or dict
# default implementation is an empty dict
xblock_body = super(SequenceDescriptor, self).index_dictionary()
html_body = {
"display_name": self.display_name,
}
if "content" in xblock_body:
xblock_body["content"].update(html_body)
else:
xblock_body["content"] = html_body
xblock_body["content_type"] = "Sequence"
return xblock_body
| agpl-3.0 | -7,704,655,475,082,857,000 | 34.370588 | 117 | 0.591385 | false |
evolvIQ/iqserialization | PerformanceTest/generate-test-data.py | 1 | 2026 | import json,xmlrpclib
import random
import sys
def random_object(maxdepth=1):
choices = [random_double, random_int, random_bool, random_string, lambda:None]
if maxdepth > 0:
for i in range(1,4):
choices.append(lambda:random_object(maxdepth-i))
return random.choice(choices)()
def random_int():
return random.randint(-1000, 1000)
def random_bool():
return bool(random.randint(0,1))
def random_double():
return random.gauss(0,1e9)
def gen_random_string(prefer_ascii):
s = []
for _ in range(random.randint(1,100)):
if not prefer_ascii and random.randint(0,2) == 0 or prefer_ascii and random.randint(0,200) == 0:
s.append(chr(random.randint(0,255)))
else:
s.append(chr(random.randint(ord('a'),ord('z'))))
return ''.join(s).decode('latin1')
strings = None
string_pool = False
def random_string(prefer_ascii=False):
if string_pool:
global strings, astrings
if strings is None:
strings = [[gen_random_string(False) for _ in range(200)],
[gen_random_string(True) for _ in range(200)]]
return random.choice(strings[prefer_ascii])
else:
return gen_random_string(prefer_ascii)
if __name__ == '__main__':
minlen = 10*1024*1024
if len(sys.argv) > 1:
minlen = int(sys.argv[1])
ct = 0
jsonf = file("test.json","w")
xmlrpcf = file("test.xmlrpc","w")
print >>jsonf, '{',
print >>xmlrpcf, '<params><param><value><struct>',
while ct < minlen:
if ct > 0:
print >>jsonf, ','
else:
print >>jsonf
k,v = (json.dumps(random_string(True)), json.dumps(random_object(10)))
s = '%s : %s' % (k,v)
ct += len(s)
print >>jsonf, s,
xk = xmlrpclib.dumps(({k:v},)).split('<struct>',1)[1].rsplit('</struct>',1)[0].strip()
print >>xmlrpcf, xk
print >>jsonf, '}'
print >>xmlrpcf, '</struct></value></param></params>',
| apache-2.0 | -2,748,689,251,981,267,500 | 30.184615 | 104 | 0.568115 | false |
leeseuljeong/leeseulstack_neutron | neutron/db/migration/alembic_migrations/versions/1fcfc149aca4_agents_unique_by_type_and_host.py | 17 | 1377 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add a unique constraint on (agent_type, host) columns to prevent a race
condition when an agent entry is 'upserted'.
Revision ID: 1fcfc149aca4
Revises: e197124d4b9
Create Date: 2013-11-27 18:35:28.148680
"""
revision = '1fcfc149aca4'
down_revision = 'e197124d4b9'
from alembic import op
from neutron.db import migration
TABLE_NAME = 'agents'
UC_NAME = 'uniq_agents0agent_type0host'
def upgrade():
if not migration.schema_has_table(TABLE_NAME):
# Assume that, in the database we are migrating from, the
# configured plugin did not create the agents table.
return
op.create_unique_constraint(
name=UC_NAME,
source=TABLE_NAME,
local_cols=['agent_type', 'host']
)
def downgrade():
pass
| apache-2.0 | 374,444,057,740,654,400 | 25.480769 | 78 | 0.705156 | false |
NMGRL/pychron | pychron/applications/pydiode.py | 2 | 1412 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.tasks.task_window_layout import TaskWindowLayout
from pychron.applications.pychron_application import PychronApplication
# ============= standard library imports ========================
# ============= local library imports ==========================
class PyDiode(PychronApplication):
id = 'pychron.diode.application'
name = 'pyDiode'
shortname = 'diode'
default_layout = [
TaskWindowLayout('tasks.hardware'),
TaskWindowLayout('pychron.fusions.diode')]
# ============= EOF =============================================
| apache-2.0 | 4,916,240,546,597,766,000 | 39.342857 | 81 | 0.572946 | false |
klmitch/appathy | appathy/types.py | 1 | 5024 | # Copyright (C) 2012 by Kevin L. Mitchell <[email protected]>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import functools
import inspect
class Translators(object):
"""
Represent a set of translators. A translator is a serializer or
deserializer, corresponding to a particular return type.
"""
def __init__(self, method, attr_name):
"""
Initialize a set of translators. The translators for a given
method are derived from the class of the method, updated with
translators set on the method itself. The `attr_name`
parameter specifies the attribute containing the translation
table.
"""
# Build up the translators
self.translators = getattr(method.im_self, attr_name, {}).copy()
self.translators.update(getattr(method, attr_name, {}))
def __call__(self, content_type):
"""
Select the translator corresponding to the given content type.
"""
# Get the type name
type_name = media_types[content_type]
# Select the translator to use
xlator = self.translators[type_name]
# If it's a class, instantiate it
if inspect.isclass(xlator):
return xlator(type_name, content_type)
# It's a function; partialize and return it
return functools.partial(xlator, type_name, content_type)
def get_types(self):
"""
Retrieve a set of all recognized content types for this
translator object.
"""
# Convert translators into a set of content types
content_types = set()
for name in self.translators:
content_types |= type_names[name]
return content_types
def _translators(attr, kwargs):
"""
Decorator which associates a set of translators (serializers or
deserializers) with a given method. The `attr` parameter
identifies which attribute is being updated.
"""
# Add translators to a function or class
def decorator(func):
# Make sure we have the attribute
try:
xlators = getattr(func, attr)
except AttributeError:
xlators = {}
setattr(func, attr, xlators)
xlators.update(kwargs)
return func
return decorator
def serializers(**kwargs):
"""
Decorator which binds a set of serializers with a method. The key
of each keyword argument is interpreted as a short name for the
content type (bind short names to content types using
register_types()), and the value is a callable.
If the callable is a class, it will be instantiated with two
arguments: the short type name and the content type. It must
define a __call__() method taking one argument: the object to
serialize. The class may also define an optional attach() method,
which allows serializers for extensions to be attached to the
primary serializer.
If the callable is a function, it will be called with three
arguments: the short type name, the content type, and the object
to serialize.
"""
return _translators('_wsgi_serializers', kwargs)
def deserializers(**kwargs):
"""
Decorator which binds a set of deserializers with a method. The
key of each keyword argument is interpreted as a short name for
the content type (bind short names to content types using
register_types()), and the value is a callable.
If the callable is a class, it will be instantiated with two
arguments: the short type name and the content type. It must
define a __call__() method taking one argument: the string to
deserialize. The class may also define an optional attach()
method, which allows deserializers for extensions to be attached
to the primary deserializer.
If the callable is a function, it will be called with three
arguments: the short type name, the content type, and the string
to deserialize.
"""
return _translators('_wsgi_deserializers', kwargs)
media_types = {}
type_names = {}
def register_types(name, *types):
"""
Register a short name for one or more content types.
"""
type_names.setdefault(name, set())
for t in types:
# Redirecting the type
if t in media_types:
type_names[media_types[t]].discard(t)
# Save the mapping
media_types[t] = name
type_names[name].add(t)
| gpl-3.0 | 806,268,398,094,400,000 | 31.623377 | 72 | 0.669586 | false |
MisterPup/OpenStack-Neat-Ceilometer | tests/locals/overload/mhod/test_multisize_estimation.py | 5 | 35207 | # Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
from collections import deque
from copy import deepcopy
import re
import neat.locals.overload.mhod.multisize_estimation as m
import logging
logging.disable(logging.CRITICAL)
def c(data):
return deepcopy(data)
class Multisize(TestCase):
def test_mean(self):
self.assertEqual(m.mean([], 100), 0.0)
self.assertEqual(m.mean([0], 100), 0.0)
self.assertEqual(m.mean([0, 0], 100), 0.0)
self.assertEqual(m.mean([1, 1], 100), 0.02)
self.assertEqual(m.mean([0, 1], 100), 0.01)
self.assertEqual(m.mean([1, 2, 3, 4, 5], 100), 0.15)
def test_variance(self):
self.assertEqual(m.variance([], 100), 0.0)
self.assertEqual(m.variance([0], 100), 0.0)
self.assertEqual(m.variance([0, 0], 100), 0.0)
self.assertAlmostEqual(m.variance([1, 1], 100), 0.0194020202)
self.assertAlmostEqual(m.variance([0, 1], 100), 0.0099010101)
self.assertAlmostEqual(m.variance([1, 2, 3, 4, 5], 100), 0.511237373)
self.assertAlmostEqual(m.variance([0, 0, 0, 1], 100), 0.0099030303)
def test_acceptable_variance(self):
self.assertAlmostEqual(m.acceptable_variance(0.2, 5), 0.032, 3)
self.assertAlmostEqual(m.acceptable_variance(0.6, 15), 0.016, 3)
def test_estimate_probability(self):
self.assertEqual(
m.estimate_probability([0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 100, 0),
0.08)
self.assertEqual(
m.estimate_probability([0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 100, 1),
0.02)
self.assertEqual(
m.estimate_probability([1, 1, 0, 0, 1, 1, 1, 1, 1, 1], 200, 0),
0.01)
self.assertEqual(
m.estimate_probability([1, 1, 0, 0, 1, 1, 1, 1, 1, 1], 200, 1),
0.04)
def test_update_request_windows(self):
max_window_size = 4
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 0, 1]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 1, 0])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1, 1])])
max_window_size = 2
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 1]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 0])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1])])
max_window_size = 4
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size),
deque([2, 2], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 0, 1]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 2),
[deque([0, 0, 2]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 1, 0]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 2),
[deque([0, 0]),
deque([1, 1, 2]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 2, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 0])])
self.assertEqual(m.update_request_windows(c(windows), 2, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 1])])
self.assertEqual(m.update_request_windows(c(windows), 2, 2),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 2])])
max_window_size = 2
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size),
deque([2, 2], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 1]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 2),
[deque([0, 2]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 0]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 2),
[deque([0, 0]),
deque([1, 2]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 2, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 0])])
self.assertEqual(m.update_request_windows(c(windows), 2, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 1])])
self.assertEqual(m.update_request_windows(c(windows), 2, 2),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
def test_update_estimate_windows(self):
req_win = [deque([1, 0, 0, 0]),
deque([1, 0, 1, 0])]
est_win = [[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}],
[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}]]
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 0),
[[{2: deque([0, 1.0]),
4: deque([0, 0, 0.75])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.25])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 1),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])}]])
req_win = [deque([1, 0, 2, 0]),
deque([1, 0, 1, 0]),
deque([2, 2, 1, 0])]
est_win = [[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}],
[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}],
[{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)},
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}]]
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 0),
[[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 1),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 2),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.5])}]])
def test_update_variances(self):
est_win = [[{2: deque([0, 0.5], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1.0, 0.5], 2),
4: deque([0, 1, 1, 1], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.25, 0.5, 0.5], 4)},
{2: deque([0.5, 0.75], 2),
4: deque([0.75, 0.75, 0.5, 0.5], 4)}]]
variances = [[{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]]
self.assertEqual(m.update_variances(c(variances), c(est_win), 0),
[[{2: 0.125,
4: 0.25},
{2: 0.125,
4: 0.25}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_variances(c(variances), c(est_win), 1),
[[{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.03125,
4: 0.020833333333333332},
{2: 0.03125,
4: 0.020833333333333332}]])
self.assertEqual(m.update_variances(
m.update_variances(c(variances), c(est_win), 0), c(est_win), 0),
[[{2: 0.125,
4: 0.25},
{2: 0.125,
4: 0.25}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]])
est_win = [[{2: deque([0, 0], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1, 1], 2),
4: deque([0, 0, 1, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.05, 0.5, 0.25], 4)},
{2: deque([0.25, 0.5], 2),
4: deque([0.4, 0.55, 0.25, 0.5], 4)},
{2: deque([0.25, 0.25], 2),
4: deque([0.35, 0.4, 0.25, 0.25], 4)}],
[{2: deque([1, 0], 2),
4: deque([1, 0, 1, 0], 4)},
{2: deque([0, 1], 2),
4: deque([0, 0, 0, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}]]
variances = [[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]]
self.assertEqual(m.update_variances(c(variances), c(est_win), 0),
[[{2: 0.0,
4: 0.25},
{2: 0.0,
4: 0.3333333333333333},
{2: 0.0,
4: 0.25}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_variances(c(variances), c(est_win), 1),
[[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.03125,
4: 0.03395833333333333},
{2: 0.03125,
4: 0.0175},
{2: 0.0,
4: 0.005625000000000001}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_variances(c(variances), c(est_win), 2),
[[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.5,
4: 0.3333333333333333},
{2: 0.5,
4: 0.25},
{2: 0.0,
4: 0.25}]])
def test_update_acceptable_variances(self):
est_win = [[{2: deque([0, 0.5], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1.0, 0.5], 2),
4: deque([0, 1, 1, 1], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.25, 0.5, 0.5], 4)},
{2: deque([0.5, 0.75], 2),
4: deque([0.75, 0.75, 0.5, 0.5], 4)}]]
acc_variances = [[{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]]
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 0),
[[{2: 0.125,
4: 0.0},
{2: 0.125,
4: 0.0}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 1),
[[{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.09375,
4: 0.0625},
{2: 0.09375,
4: 0.0625}]])
self.assertEqual(m.update_acceptable_variances(
m.update_acceptable_variances(
c(acc_variances), c(est_win), 0), c(est_win), 0),
[[{2: 0.125,
4: 0.0},
{2: 0.125,
4: 0.0}],
[{2: 0,
4: 0},
{2: 0,
4: 0}]])
est_win = [[{2: deque([0, 0], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1, 1], 2),
4: deque([0, 0, 1, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.05, 0.5, 0.25], 4)},
{2: deque([0.25, 0.5], 2),
4: deque([0.4, 0.55, 0.25, 0.5], 4)},
{2: deque([0.25, 0.25], 2),
4: deque([0.35, 0.4, 0.25, 0.25], 4)}],
[{2: deque([1, 0], 2),
4: deque([1, 0, 1, 0], 4)},
{2: deque([0, 1], 2),
4: deque([0, 0, 0, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}]]
acc_variances = [[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]]
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 0),
[[{2: 0.0,
4: 0.0},
{2: 0.0,
4: 0.0},
{2: 0.0,
4: 0.0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 1),
[[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.09375,
4: 0.046875},
{2: 0.125,
4: 0.0625},
{2: 0.09375,
4: 0.046875}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}]])
self.assertEqual(m.update_acceptable_variances(c(acc_variances),
c(est_win), 2),
[[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0},
{2: 0,
4: 0}],
[{2: 0.0,
4: 0.0},
{2: 0.0,
4: 0.0},
{2: 0.0,
4: 0.0}]])
def test_select_window(self):
variances = [[{2: 0.2,
4: 0.9},
{2: 0.2,
4: 0.6}],
[{2: 0.2,
4: 0},
{2: 0.2,
4: 0.8}]]
acc_variances = [[{2: 0.1,
4: 0.5},
{2: 0.4,
4: 0.5}],
[{2: 0.4,
4: 0.5},
{2: 0.1,
4: 0.5}]]
window_sizes = [2, 4]
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[2, 2],
[4, 2]])
variances = [[{2: 0,
4: 0.9},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0.8}]]
acc_variances = [[{2: 0.5,
4: 0.5},
{2: 0.6,
4: 0.5}],
[{2: 0.7,
4: 0.5},
{2: 0.4,
4: 0.5}]]
window_sizes = [2, 4]
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[2, 4],
[4, 2]])
variances = [[{2: 0,
4: 0.9},
{2: 0,
4: 0},
{2: 0,
4: 1.0}],
[{2: 0,
4: 0},
{2: 0,
4: 0.8},
{2: 0,
4: 0}],
[{2: 0,
4: 0},
{2: 0,
4: 0.8},
{2: 0.5,
4: 0}]]
acc_variances = [[{2: 0.5,
4: 0.9},
{2: 0.6,
4: 0.9},
{2: 0.6,
4: 0.9}],
[{2: 0.7,
4: 0.9},
{2: 0.4,
4: 0.9},
{2: 0.4,
4: 0.9}],
[{2: 0.7,
4: 0.9},
{2: 0.4,
4: 0.5},
{2: 0.4,
4: 0.9}]]
window_sizes = [2, 4]
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[4, 4, 2],
[4, 4, 4],
[4, 2, 2]])
def test_select_best_estimates(self):
est_win = [[{2: deque([0, 0], 2),
4: deque([1, 0, 0, 0], 4)},
{2: deque([1, 1], 2),
4: deque([0, 0, 1, 1], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}],
[{2: deque([0.5, 0.25], 2),
4: deque([0.25, 0.05, 0.5, 0.25], 4)},
{2: deque([0.25, 0.5], 2),
4: deque([0.4, 0.55, 0.25, 0.6], 4)},
{2: deque([0.25, 0.25], 2),
4: deque([0.35, 0.4, 0.25, 0.15], 4)}],
[{2: deque([1, 0], 2),
4: deque([1, 0, 1, 0], 4)},
{2: deque([0, 1], 2),
4: deque([0, 0, 0, 0.2], 4)},
{2: deque([0, 0], 2),
4: deque([0, 1, 0, 0], 4)}]]
selected_windows1 = [[2, 4, 2],
[2, 2, 4],
[4, 2, 2]]
selected_windows2 = [[4, 4, 4],
[2, 2, 2],
[2, 4, 2]]
self.assertEqual(
m.select_best_estimates(c(est_win), selected_windows1),
[[0, 1, 0],
[0.25, 0.5, 0.15],
[0, 1, 0]])
self.assertEqual(
m.select_best_estimates(c(est_win), selected_windows2),
[[0, 1, 0],
[0.25, 0.5, 0.25],
[0, 0.2, 0]])
est_win = [[{2: deque(),
4: deque()},
{2: deque(),
4: deque()}],
[{2: deque(),
4: deque()},
{2: deque(),
4: deque()}]]
self.assertEqual(
m.select_best_estimates(c(est_win), [[2, 4], [4, 2]]),
[[0.0, 0.0],
[0.0, 0.0]])
self.assertEqual(
m.select_best_estimates(c(est_win), [[2, 2], [4, 4]]),
[[0.0, 0.0],
[0.0, 0.0]])
def test_init_request_windows(self):
structure = m.init_request_windows(1, 4)
self.assertEqual(structure, [deque()])
self.assertEqual(deque_maxlen(structure[0]), 4)
structure = m.init_request_windows(2, 4)
self.assertEqual(structure, [deque(),
deque()])
self.assertEqual(deque_maxlen(structure[0]), 4)
self.assertEqual(deque_maxlen(structure[1]), 4)
structure = m.init_request_windows(3, 4)
self.assertEqual(structure, [deque(),
deque(),
deque()])
self.assertEqual(deque_maxlen(structure[0]), 4)
self.assertEqual(deque_maxlen(structure[1]), 4)
self.assertEqual(deque_maxlen(structure[2]), 4)
def test_init_variances(self):
self.assertEqual(m.init_variances([2, 4], 1), [[{2: 1.0,
4: 1.0}]])
self.assertEqual(m.init_variances([2, 4], 2), [[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}],
[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}]])
self.assertEqual(m.init_variances([2, 4], 3), [[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}],
[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}],
[{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0},
{2: 1.0,
4: 1.0}]])
def test_init_3_level_structure(self):
structure = m.init_deque_structure([2, 4], 1)
self.assertEqual(structure, [[{2: deque(),
4: deque()}]])
self.assertEqual(deque_maxlen(structure[0][0][2]), 2)
self.assertEqual(deque_maxlen(structure[0][0][4]), 4)
structure = m.init_deque_structure([2, 4], 2)
self.assertEqual(structure, [[{2: deque(),
4: deque()},
{2: deque(),
4: deque()}],
[{2: deque(),
4: deque()},
{2: deque(),
4: deque()}]])
self.assertEqual(deque_maxlen(structure[0][0][2]), 2)
self.assertEqual(deque_maxlen(structure[0][0][4]), 4)
self.assertEqual(deque_maxlen(structure[0][1][2]), 2)
self.assertEqual(deque_maxlen(structure[0][1][4]), 4)
self.assertEqual(deque_maxlen(structure[1][0][2]), 2)
self.assertEqual(deque_maxlen(structure[1][0][4]), 4)
self.assertEqual(deque_maxlen(structure[1][1][2]), 2)
self.assertEqual(deque_maxlen(structure[1][1][4]), 4)
structure = m.init_deque_structure([2, 4], 3)
self.assertEqual(structure, [[{2: deque(),
4: deque()},
{2: deque(),
4: deque()},
{2: deque(),
4: deque()}],
[{2: deque(),
4: deque()},
{2: deque(),
4: deque()},
{2: deque(),
4: deque()}],
[{2: deque(),
4: deque()},
{2: deque(),
4: deque()},
{2: deque(),
4: deque()}]])
self.assertEqual(deque_maxlen(structure[0][0][2]), 2)
self.assertEqual(deque_maxlen(structure[0][0][4]), 4)
self.assertEqual(deque_maxlen(structure[0][1][2]), 2)
self.assertEqual(deque_maxlen(structure[0][1][4]), 4)
self.assertEqual(deque_maxlen(structure[0][2][2]), 2)
self.assertEqual(deque_maxlen(structure[0][2][4]), 4)
self.assertEqual(deque_maxlen(structure[1][0][2]), 2)
self.assertEqual(deque_maxlen(structure[1][0][4]), 4)
self.assertEqual(deque_maxlen(structure[1][1][2]), 2)
self.assertEqual(deque_maxlen(structure[1][1][4]), 4)
self.assertEqual(deque_maxlen(structure[1][2][2]), 2)
self.assertEqual(deque_maxlen(structure[1][2][4]), 4)
self.assertEqual(deque_maxlen(structure[2][0][2]), 2)
self.assertEqual(deque_maxlen(structure[2][0][4]), 4)
self.assertEqual(deque_maxlen(structure[2][1][2]), 2)
self.assertEqual(deque_maxlen(structure[2][1][4]), 4)
self.assertEqual(deque_maxlen(structure[2][2][2]), 2)
self.assertEqual(deque_maxlen(structure[2][2][4]), 4)
def test_init_selected_window_sizes(self):
self.assertEqual(
m.init_selected_window_sizes([2, 4], 1), [[2]])
self.assertEqual(
m.init_selected_window_sizes([2, 4], 2), [[2, 2],
[2, 2]])
self.assertEqual(
m.init_selected_window_sizes([2, 4], 3), [[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
def deque_maxlen(coll):
return int(re.sub("\)$", "", re.sub(".*=", "", coll.__repr__())))
| apache-2.0 | 8,894,116,129,880,409,000 | 39.607843 | 77 | 0.292158 | false |
arakcheev/python-data-plotter | parkerWind/old/wind.py | 1 | 1123 | import math
import params
MASS = params.STAR_MASS
accuracy = params.accuracy
# Radius in cm
def sonic_radius(t):
return 2.0e11 * 2e6 / t * MASS
# Sound speed in km/s
def sound_speed(t):
return 180 * math.sqrt(t / 2.0e6)
def right_part(r, t):
rs = sonic_radius(t)
return 4.0 * math.log(1.0 * r / rs) + 4.0 * rs / r - 3
def left_part(u, t):
a0 = sound_speed(t)
a0srq = a0 * a0
uu = 1.0 * u * u / a0srq
return uu - math.log(uu)
def get(r, t):
rp = right_part(r, t)
rs = sonic_radius(t)
cs = sound_speed(t)
if r < rs:
left = 0.1
right = cs
else:
left = cs
right = 1000
def eq(u):
return left_part(u, t) - rp
middle = (left + right) / 2
err = eq(middle)
index = 0
while math.fabs(err) > accuracy:
if eq(left) * eq(middle) < 0:
right = middle
else:
left = middle
middle = 1.0 * (left + right) / 2.0
err = eq(middle)
index += 1
if index == 1000:
break
return middle
def getsgs(r, t):
return get(r, t) * 1e5
| mit | -659,596,711,728,893,400 | 16.276923 | 58 | 0.508459 | false |
congma/pypar | examples/MPIMapReducer.py | 2 | 6166 | """
MapReduce-over-MPI sample
Run as (as exsample)
mpirun -np 2 python MPIMapReducer.py
(perhaps try number of processors more than 2)
GPC JAN 2013
"""
import sys
try:
import numpy
except:
raise Exception, 'Module numpy must be present to run pypar'
try:
import pypar
except:
raise Exception, 'Module pypar must be present to run parallel'
import logging
LOGFMT = 'pid: %(process)d - %(asctime)s - %(levelname)s:%(message)s'
logging.basicConfig(format=LOGFMT, level=logging.INFO)
class MPIMapReducer():
def __init__(self, aWorkList):
self.WORKTAG = 1
self.DIETAG = 2
self.MPI_myid = pypar.rank()
self.MPI_numproc = pypar.size()
self.MPI_node = pypar.get_processor_name()
self.works = aWorkList
self.numWorks = len(self.works)
self.reduceFunction = None
self.mapFunction = None
self.result = None
if self.MPI_numproc < 2:
pypar.finalize()
if self.MPI_myid == 0:
raise Exception, 'ERROR: Number of processors must be greater than 2.'
def master(self):
self.numCompleted = 0
self.mapList = list()
logging.info('[MASTER]: started processor %d of %d on node %s: number of works: %d'%(self.MPI_myid, self.MPI_numproc, self.MPI_node, self.numWorks))
# start slaves distributing the first work slot
rounder = 0
if self.MPI_numproc <= self.numWorks:
rounder = 1
for i in range(min(self.MPI_numproc, self.numWorks)-rounder):
work = self.works[i]
pypar.send(work, destination=i+1, tag=self.WORKTAG)
logging.debug('[MASTER]: sent work "%s" to node %d' %(work, i+1))
# dispatch the remaining work slots on dynamic load-balancing policy
# the quicker to do the job, the more jobs it takes
for work in self.works[self.MPI_numproc-1:]:
result, status = pypar.receive(source=pypar.any_source, tag=self.WORKTAG,
return_status=True)
logging.debug('[MASTER]: received result "%s" from node %d'%(result, status.source))
self.mapList.append(result)
self.numCompleted += 1
logging.debug('[MASTER]: done : %d' %self.numCompleted)
pypar.send(work, destination=status.source, tag=self.WORKTAG)
logging.debug('[MASTER]: sent work "%s" to node %d' %(work, status.source))
# all works have been dispatched out
logging.debug('[MASTER]: toDo : %d' %self.numWorks)
logging.debug('[MASTER]: done : %d' %self.numCompleted)
# I've still to take into the remaining completions
while (self.numCompleted < self.numWorks):
result, status = pypar.receive(source=pypar.any_source, tag=self.WORKTAG,
return_status=True)
logging.debug('[MASTER]: received (final) result "%s" from node %d'%(result, status.source))
self.mapList.append(result)
self.numCompleted += 1
logging.debug('[MASTER]: %d completed' %self.numCompleted)
logging.debug('[MASTER]: about to terminate slaves')
# Tell slaves to stop working
for i in range(1, self.MPI_numproc):
pypar.send('#', destination=i, tag=self.DIETAG)
logging.debug('[MASTER]: sent termination signal to node %d' %(i, ))
# call the reduce function
logging.info('[MASTER]: about to run reduce')
res = self.reduceFunction(self.mapList)
return res
def slave(self):
logging.debug('[SLAVE %d]: started processor %d of %d on node %s'%(self.MPI_myid, self.MPI_myid, self.MPI_numproc, self.MPI_node))
while True:
inputMsg, status = pypar.receive(source=0,
tag=pypar.any_tag,
return_status=True)
logging.debug('[SLAVE %d]: received work "%s" with tag %d from node %d' %(self.MPI_myid, inputMsg, status.tag, status.source))
if (status.tag == self.DIETAG):
logging.debug('[SLAVE %d]: received termination from node %d'%(self.MPI_myid, 0))
return
else:
logging.debug('[SLAVE %d]: received work "%s" to map' %(self.MPI_myid, inputMsg))
resultMsg = self.mapFunction(inputMsg)
pypar.send(resultMsg, destination=0, tag=self.WORKTAG)
logging.debug('[SLAVE %d]: sent result "%s" to node %d'%(self.MPI_myid, resultMsg, 0))
def setReduce(self, aFunction):
self.reduceFunction = aFunction
def setMap(self, aFunction):
self.mapFunction = aFunction
def runMapReduce(self):
if self.MPI_myid == 0:
self.result = self.master()
else:
self.slave()
pypar.finalize()
logging.debug('[PROCESS %d]: MPI environment finalized.'%(self.MPI_myid, ))
return
def getResult(self):
if self.MPI_myid == 0:
return self.result
else:
logging.debug('[SLAVE %d]: ending.'%(self.MPI_myid, ))
sys.exit(0)
if __name__ == '__main__':
def mapFn(anInput):
return 'X' + anInput
def reduceFn(aList):
return ''.join(aList)
workList = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']*10
logging.debug('starting mpimapreducer with %d works.'%(len(workList),))
m2r = MPIMapReducer(workList)
m2r.setMap(mapFn)
m2r.setReduce(reduceFn)
# here's the beef
m2r.runMapReduce()
lenmr = len(m2r.getResult())
print "MAPREDUCE :", lenmr
logging.debug('ended mpimapreduce')
logging.info('starting sequential evaluation')
lenseq = len(''.join('X' + item for item in workList))
print "SEQUENTIAL:", lenseq
logging.info('ending sequential evaluation')
# the result must be the same
assert lenmr == lenseq
| gpl-3.0 | 8,002,693,074,652,983,000 | 33.066298 | 156 | 0.570224 | false |
gojira/tensorflow | tensorflow/contrib/opt/python/training/adamax_test.py | 13 | 14685 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AdaMax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import adamax
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adamax_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t = beta1 * m + (1 - beta1) * g_t
v_t = np.maximum(beta2 * v, np.abs(g_t))
param_t = param - (alpha / (1 - beta1**t)) * (m_t / (v_t + epsilon))
return param_t, m_t, v_t
def adamax_sparse_update_numpy(param,
indices,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
m_t, v_t, param_t = np.copy(m), np.copy(v), np.copy(param)
m_t_slice = beta1 * m[indices] + (1 - beta1) * g_t
v_t_slice = np.maximum(beta2 * v[indices], np.abs(g_t))
param_t_slice = param[indices] - ((alpha / (1 - beta1**t)) *
(m_t_slice / (v_t_slice + epsilon)))
m_t[indices] = m_t_slice
v_t[indices] = v_t_slice
param_t[indices] = param_t_slice
return param_t, m_t, v_t
class AdaMaxOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
zero_slots = lambda: np.zeros((3), dtype=dtype.as_numpy_dtype)
m0, v0, m1, v1 = zero_slots(), zero_slots(), zero_slots(), zero_slots()
var0_np = np.array([1.0, 2.0, 3.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([4.0, 5.0, 6.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([2, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adamax.AdaMaxOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0, 3.0], var0.eval())
self.assertAllClose([4.0, 5.0, 6.0], var1.eval())
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_sparse_update_numpy(
var0_np, grads0_np_indices, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_sparse_update_numpy(
var1_np, grads1_np_indices, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adamax.AdaMaxOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adamax.AdaMaxOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adamax.AdaMaxOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
with self.test_session(graph=ops.Graph()):
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
var0_np, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
var1_np, name="var1_%d" % i)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.AdaMaxOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
opt_variables = opt.variables()
beta1_power = opt._get_beta_accumulators()
self.assertTrue(beta1_power is not None)
self.assertIn(beta1_power, opt_variables)
if not context.executing_eagerly():
with ops.Graph().as_default():
# Shouldn't return non-slot variables from other graphs.
self.assertEqual(0, len(opt.variables()))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
if not context.executing_eagerly():
self.evaluate(update)
elif t > 1:
opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0),
rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1),
rtol=1e-2)
if use_resource:
self.assertEqual("var0_%d/AdaMax:0" % (i,),
opt.get_slot(var=var0, name="m").name)
def testBasic(self):
with self.test_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.AdaMaxOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
update.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adamax.AdaMaxOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined AdaMax1 and AdaMax2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = adamax.AdaMaxOptimizer()
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
def testSlotsUniqueEager(self):
with context.eager_mode():
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
opt = adamax.AdaMaxOptimizer(1.)
opt.minimize(lambda: v1 + v2)
# There should be two non-slot variables, and two unique slot variables
# for v1 and v2 respectively.
self.assertEqual(5, len(set(opt.variables())))
if __name__ == "__main__":
test.main()
| apache-2.0 | -4,885,842,497,378,936,000 | 40.957143 | 80 | 0.598434 | false |
nekulin/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/idlelib/CallTips.py | 50 | 7587 | """CallTips.py - An IDLE Extension to Jog Your Memory
Call Tips are floating windows which display function, class, and method
parameter and docstring information when you type an opening parenthesis, and
which disappear when you type a closing parenthesis.
"""
import re
import sys
import types
import CallTipWindow
from HyperParser import HyperParser
import __main__
class CallTips:
menudefs = [
('edit', [
("Show call tip", "<<force-open-calltip>>"),
])
]
def __init__(self, editwin=None):
if editwin is None: # subprocess and test
self.editwin = None
return
self.editwin = editwin
self.text = editwin.text
self.calltip = None
self._make_calltip_window = self._make_tk_calltip_window
def close(self):
self._make_calltip_window = None
def _make_tk_calltip_window(self):
# See __init__ for usage
return CallTipWindow.CallTip(self.text)
def _remove_calltip_window(self, event=None):
if self.calltip:
self.calltip.hidetip()
self.calltip = None
def force_open_calltip_event(self, event):
"""Happens when the user really wants to open a CallTip, even if a
function call is needed.
"""
self.open_calltip(True)
def try_open_calltip_event(self, event):
"""Happens when it would be nice to open a CallTip, but not really
neccesary, for example after an opening bracket, so function calls
won't be made.
"""
self.open_calltip(False)
def refresh_calltip_event(self, event):
"""If there is already a calltip window, check if it is still needed,
and if so, reload it.
"""
if self.calltip and self.calltip.is_active():
self.open_calltip(False)
def open_calltip(self, evalfuncs):
self._remove_calltip_window()
hp = HyperParser(self.editwin, "insert")
sur_paren = hp.get_surrounding_brackets('(')
if not sur_paren:
return
hp.set_index(sur_paren[0])
name = hp.get_expression()
if not name or (not evalfuncs and name.find('(') != -1):
return
arg_text = self.fetch_tip(name)
if not arg_text:
return
self.calltip = self._make_calltip_window()
self.calltip.showtip(arg_text, sur_paren[0], sur_paren[1])
def fetch_tip(self, name):
"""Return the argument list and docstring of a function or class
If there is a Python subprocess, get the calltip there. Otherwise,
either fetch_tip() is running in the subprocess itself or it was called
in an IDLE EditorWindow before any script had been run.
The subprocess environment is that of the most recently run script. If
two unrelated modules are being edited some calltips in the current
module may be inoperative if the module was not the last to run.
To find methods, fetch_tip must be fed a fully qualified name.
"""
try:
rpcclt = self.editwin.flist.pyshell.interp.rpcclt
except:
rpcclt = None
if rpcclt:
return rpcclt.remotecall("exec", "get_the_calltip",
(name,), {})
else:
entity = self.get_entity(name)
return get_arg_text(entity)
def get_entity(self, name):
"Lookup name in a namespace spanning sys.modules and __main.dict__"
if name:
namespace = sys.modules.copy()
namespace.update(__main__.__dict__)
try:
return eval(name, namespace)
except (NameError, AttributeError):
return None
def _find_constructor(class_ob):
# Given a class object, return a function object used for the
# constructor (ie, __init__() ) or None if we can't find one.
try:
return class_ob.__init__.im_func
except AttributeError:
for base in class_ob.__bases__:
rc = _find_constructor(base)
if rc is not None: return rc
return None
def get_arg_text(ob):
"""Get a string describing the arguments for the given object"""
arg_text = ""
if ob is not None:
arg_offset = 0
if type(ob) in (types.ClassType, types.TypeType):
# Look for the highest __init__ in the class chain.
fob = _find_constructor(ob)
if fob is None:
fob = lambda: None
else:
arg_offset = 1
elif type(ob)==types.MethodType:
# bit of a hack for methods - turn it into a function
# but we drop the "self" param.
fob = ob.im_func
arg_offset = 1
else:
fob = ob
# Try to build one for Python defined functions
if type(fob) in [types.FunctionType, types.LambdaType]:
argcount = fob.func_code.co_argcount
real_args = fob.func_code.co_varnames[arg_offset:argcount]
defaults = fob.func_defaults or []
defaults = list(map(lambda name: "=%s" % repr(name), defaults))
defaults = [""] * (len(real_args) - len(defaults)) + defaults
items = map(lambda arg, dflt: arg + dflt, real_args, defaults)
if fob.func_code.co_flags & 0x4:
items.append("...")
if fob.func_code.co_flags & 0x8:
items.append("***")
arg_text = ", ".join(items)
arg_text = "(%s)" % re.sub("\.\d+", "<tuple>", arg_text)
# See if we can use the docstring
doc = getattr(ob, "__doc__", "")
if doc:
doc = doc.lstrip()
pos = doc.find("\n")
if pos < 0 or pos > 70:
pos = 70
if arg_text:
arg_text += "\n"
arg_text += doc[:pos]
return arg_text
#################################################
#
# Test code
#
if __name__=='__main__':
def t1(): "()"
def t2(a, b=None): "(a, b=None)"
def t3(a, *args): "(a, ...)"
def t4(*args): "(...)"
def t5(a, *args): "(a, ...)"
def t6(a, b=None, *args, **kw): "(a, b=None, ..., ***)"
def t7((a, b), c, (d, e)): "(<tuple>, c, <tuple>)"
class TC(object):
"(ai=None, ...)"
def __init__(self, ai=None, *b): "(ai=None, ...)"
def t1(self): "()"
def t2(self, ai, b=None): "(ai, b=None)"
def t3(self, ai, *args): "(ai, ...)"
def t4(self, *args): "(...)"
def t5(self, ai, *args): "(ai, ...)"
def t6(self, ai, b=None, *args, **kw): "(ai, b=None, ..., ***)"
def t7(self, (ai, b), c, (d, e)): "(<tuple>, c, <tuple>)"
def test(tests):
ct = CallTips()
failed=[]
for t in tests:
expected = t.__doc__ + "\n" + t.__doc__
name = t.__name__
# exercise fetch_tip(), not just get_arg_text()
try:
qualified_name = "%s.%s" % (t.im_class.__name__, name)
except AttributeError:
qualified_name = name
arg_text = ct.fetch_tip(qualified_name)
if arg_text != expected:
failed.append(t)
fmt = "%s - expected %s, but got %s"
print fmt % (t.__name__, expected, get_arg_text(t))
print "%d of %d tests failed" % (len(failed), len(tests))
tc = TC()
tests = (t1, t2, t3, t4, t5, t6, t7,
TC, tc.t1, tc.t2, tc.t3, tc.t4, tc.t5, tc.t6, tc.t7)
test(tests)
| apache-2.0 | -1,792,947,935,949,947,100 | 33.330317 | 79 | 0.534599 | false |
Hasimir/brython | scripts/make_VFS.py | 1 | 2495 | # -*- coding: utf-8 -*-
import json
import os
import re
import python_minifier
def process(filename, exclude_dirs=['test','site-packages']):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
main_root = os.path.dirname(filename)
VFS = {}
for stdlib_dir in ("libs", "Lib"):
for root, _dir, files in os.walk(os.path.join(main_root, stdlib_dir)):
flag = False
root_elts = root.split(os.sep)
for exclude in exclude_dirs:
if exclude in root_elts:
flag = True
continue
if flag:
continue # skip these modules
if '__pycache__' in root:
continue
nb += 1
for _file in files:
ext = os.path.splitext(_file)[1]
if ext not in ('.js', '.py'):
continue
if re.match(r'^module\d+\..*$', _file):
continue
nb += 1
file_name = os.path.join(root, _file)
with open(file_name, encoding='utf-8') as f:
data = f.read()
if ext == '.py':
data = python_minifier.minify(data, preserve_lines=True)
vfs_path = os.path.join(root, _file).replace(main_root, '')
vfs_path = vfs_path.replace("\\", "/")
if vfs_path.startswith('/libs/crypto_js/rollups/'):
if _file not in ('md5.js', 'sha1.js', 'sha3.js',
'sha224.js', 'sha384.js', 'sha512.js'):
continue
mod_name = vfs_path[len(stdlib_dir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
VFS[mod_name] = [ext, data, 1]
else:
VFS[mod_name] = [ext, data]
print("adding {}".format(mod_name))
print('{} files, {} errors'.format(nb, nb_err))
with open(filename, "w") as out:
out.write('__BRYTHON__.use_VFS = true;\n')
out.write('__BRYTHON__.VFS={}\n\n'.format(json.dumps(VFS)))
if __name__ == '__main__':
main_root = os.path.join(os.path.dirname(os.getcwd()), 'www', 'src')
process(os.path.join(main_root, "py_VFS.js"))
| bsd-3-clause | 192,025,640,154,851,200 | 33.652778 | 78 | 0.467735 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.