gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Base classes for writing management commands (named commands which can
be executed through ``tipi.py``).
"""
import os
import sys
from ConfigParser import ConfigParser
from optparse import make_option, OptionParser
from virtualenv import resolve_interpreter
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``tipi.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbose', action='store', dest='verbose', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('-p', '--python',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
#TODO syntax coloring support
#def __init__(self):
# #self.style = color_style()
# try:
# home = os.getenv('USERPROFILE') or os.getenv('HOME')
# config = ConfigParser(open(os.path.join(home, '.tipirc')))
# except IOError:
# pass
# except:
# pass
#
# self._interpreter = resolve_interpreter('python')
#
#@property
#def python_interpreter(self):
# return self._interpreter
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
#TODO placeholder
return (0, 1, 0,)
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=str(self.get_version()),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested, then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command. If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
try:
#output = self.handle(*args, **options)
print self.handle(*args, **options)
#if output:
# print output
except CommandError, e:
#sys.stderr.write(self.style.ERROR(str('Error: %s\n' % e)))
sys.stderr.write(str('Error: %s\n' % e))
sys.exit(1)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
#class AppCommand(BaseCommand):
# """
# A management command which takes one or more installed application
# names as arguments, and does something with each of them.
#
# Rather than implementing ``handle()``, subclasses must implement
# ``handle_app()``, which will be called once for each application.
#
# """
# args = '<appname appname ...>'
#
# def handle(self, *app_labels, **options):
# from django.db import models
# if not app_labels:
# raise CommandError('Enter at least one appname.')
# try:
# app_list = [models.get_app(app_label) for app_label in app_labels]
# except (ImproperlyConfigured, ImportError), e:
# raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
# output = []
# for app in app_list:
# app_output = self.handle_app(app, **options)
# if app_output:
# output.append(app_output)
# return '\n'.join(output)
#
# def handle_app(self, app, **options):
# """
# Perform the command's actions for ``app``, which will be the
# Python module corresponding to an application name given on
# the command line.
#
# """
# raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
#class NoArgsCommand(BaseCommand):
# """
# A command which takes no arguments on the command line.
#
# Rather than implementing ``handle()``, subclasses must implement
# ``handle_noargs()``; ``handle()`` itself is overridden to ensure
# no arguments are passed to the command.
#
# Attempting to pass arguments will raise ``CommandError``.
#
# """
# args = ''
#
# def handle(self, *args, **options):
# if args:
# raise CommandError("Command doesn't accept any arguments")
# return self.handle_noargs(**options)
#
# def handle_noargs(self, **options):
# """
# Perform this command's actions.
#
# """
# raise NotImplementedError()
#def copy_helper(style, app_or_project, name, directory, other_name=''):
# """
# Copies either a Django application layout template or a Django project
# layout template into the specified directory.
#
# """
# # style -- A color style object (see django.core.management.color).
# # app_or_project -- The string 'app' or 'project'.
# # name -- The name of the application or project.
# # directory -- The directory to which the layout template should be copied.
# # other_name -- When copying an application layout, this should be the name
# # of the project.
# import re
# import shutil
# other = {'project': 'app', 'app': 'project'}[app_or_project]
# if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# # Provide a smart error message, depending on the error.
# if not re.search(r'^[_a-zA-Z]', name):
# message = 'make sure the name begins with a letter or underscore'
# else:
# message = 'use only numbers, letters and underscores'
# raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
# top_dir = os.path.join(directory, name)
# try:
# os.mkdir(top_dir)
# except OSError, e:
# raise CommandError(e)
#
# # Determine where the app or project templates are. Use
# # django.__path__[0] because we don't know into which directory
# # django has been installed.
# template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
#
# for d, subdirs, files in os.walk(template_dir):
# relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
# if relative_dir:
# os.mkdir(os.path.join(top_dir, relative_dir))
# for i, subdir in enumerate(subdirs):
# if subdir.startswith('.'):
# del subdirs[i]
# for f in files:
# if not f.endswith('.py'):
# # Ignore .pyc, .pyo, .py.class etc, as they cause various
# # breakages.
# continue
# path_old = os.path.join(d, f)
# path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
# fp_old = open(path_old, 'r')
# fp_new = open(path_new, 'w')
# fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
# fp_old.close()
# fp_new.close()
# try:
# shutil.copymode(path_old, path_new)
# _make_writeable(path_new)
# except OSError:
# sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
#
#def _make_writeable(filename):
# """
# Make sure that the file is writeable. Useful if our source is
# read-only.
#
# """
# import stat
# if sys.platform.startswith('java'):
# # On Jython there is no os.access()
# return
# if not os.access(filename, os.W_OK):
# st = os.stat(filename)
# new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
# os.chmod(filename, new_permissions)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import division
from __future__ import absolute_import
import logging
import time
import six
import six.moves.urllib.parse # pylint: disable=import-error
from six.moves import input # pylint: disable=redefined-builtin
from telemetry.core import exceptions
from telemetry.internal.actions import page_action
from telemetry.internal.actions.drag import DragAction
from telemetry.internal.actions.javascript_click import ClickElementAction
from telemetry.internal.actions.key_event import KeyPressAction
from telemetry.internal.actions.load_media import LoadMediaAction
from telemetry.internal.actions.mouse_click import MouseClickAction
from telemetry.internal.actions.navigate import NavigateAction
from telemetry.internal.actions.page_action import GESTURE_SOURCE_DEFAULT
from telemetry.internal.actions.page_action import SUPPORTED_GESTURE_SOURCES
from telemetry.internal.actions.pinch import PinchAction
from telemetry.internal.actions.play import PlayAction
from telemetry.internal.actions.repaint_continuously import (
RepaintContinuouslyAction)
from telemetry.internal.actions.repeatable_scroll import RepeatableScrollAction
from telemetry.internal.actions.scroll import ScrollAction
from telemetry.internal.actions.scroll_bounce import ScrollBounceAction
from telemetry.internal.actions.scroll_to_element import ScrollToElementAction
from telemetry.internal.actions.seek import SeekAction
from telemetry.internal.actions.swipe import SwipeAction
from telemetry.internal.actions.tap import TapAction
from telemetry.internal.actions.wait import WaitForElementAction
from telemetry.web_perf import timeline_interaction_record
from py_trace_event import trace_event
import py_utils
# Time to wait in seconds before requesting a memory dump in deterministic
# mode, thus allowing metric values to stabilize a bit.
_MEMORY_DUMP_WAIT_TIME = 3
# Time to wait in seconds after forcing garbage collection to allow its
# effects to propagate. Experimentally determined on an Android One device
# that Java Heap garbage collection can take ~5 seconds to complete.
_GARBAGE_COLLECTION_PROPAGATION_TIME = 6
if six.PY2:
ActionRunnerBase = object
else:
ActionRunnerBase = six.with_metaclass(trace_event.TracedMetaClass, object)
class ActionRunner(ActionRunnerBase):
if six.PY2:
__metaclass__ = trace_event.TracedMetaClass
def __init__(self, tab, skip_waits=False):
self._tab = tab
self._skip_waits = skip_waits
@property
def tab(self):
"""Returns the tab on which actions are performed."""
return self._tab
def _RunAction(self, action):
logging.info("START Page Action: %s", action)
action.WillRunAction(self._tab)
action.RunAction(self._tab)
logging.info("DONE Page Action: %s", action.__class__.__name__)
def CreateInteraction(self, label, repeatable=False):
""" Create an action.Interaction object that issues interaction record.
An interaction record is a labeled time period containing
interaction that developers care about. Each set of metrics
specified in flags will be calculated for this time period.
To mark the start of interaction record, call Begin() method on the returned
object. To mark the finish of interaction record, call End() method on
it. Or better yet, use the with statement to create an
interaction record that covers the actions in the with block.
e.g:
with action_runner.CreateInteraction('Animation-1'):
action_runner.TapElement(...)
action_runner.WaitForJavaScriptCondition(...)
Args:
label: A label for this particular interaction. This can be any
user-defined string, but must not contain '/'.
repeatable: Whether other interactions may use the same logical name
as this interaction. All interactions with the same logical name must
have the same flags.
Returns:
An instance of action_runner.Interaction
"""
flags = []
if repeatable:
flags.append(timeline_interaction_record.REPEATABLE)
return Interaction(self, label, flags)
def CreateGestureInteraction(self, label, repeatable=False):
""" Create an action.Interaction object that issues gesture-based
interaction record.
This is similar to normal interaction record, but it will
auto-narrow the interaction time period to only include the
synthetic gesture event output by Chrome. This is typically use to
reduce noise in gesture-based analysis (e.g., analysis for a
swipe/scroll).
The interaction record label will be prepended with 'Gesture_'.
e.g:
with action_runner.CreateGestureInteraction('Scroll-1'):
action_runner.ScrollPage()
Args:
label: A label for this particular interaction. This can be any
user-defined string, but must not contain '/'.
repeatable: Whether other interactions may use the same logical name
as this interaction. All interactions with the same logical name must
have the same flags.
Returns:
An instance of action_runner.Interaction
"""
return self.CreateInteraction('Gesture_' + label, repeatable)
def WaitForNetworkQuiescence(self, timeout_in_seconds=10):
""" Wait for network quiesence on the page.
Args:
timeout_in_seconds: maximum amount of time (seconds) to wait for network
quiesence before raising exception.
Raises:
py_utils.TimeoutException when the timeout is reached but the page's
network is not quiet.
"""
py_utils.WaitFor(self.tab.HasReachedQuiescence, timeout_in_seconds)
def MeasureMemory(self, deterministic_mode=False):
"""Add a memory measurement to the trace being recorded.
Behaves as a no-op if tracing is not enabled.
Args:
deterministic_mode: A boolean indicating whether to attempt or not to
control the environment (force GCs, clear caches) before making the
measurement in an attempt to obtain more deterministic results.
Returns:
GUID of the generated dump if one was triggered, None otherwise.
"""
if not self.tab.browser.platform.tracing_controller.is_tracing_running:
logging.warning('Tracing is off. No memory dumps are being recorded.')
return None
if deterministic_mode:
self.Wait(_MEMORY_DUMP_WAIT_TIME)
self.ForceGarbageCollection()
dump_id = self.tab.browser.DumpMemory()
if not dump_id:
raise exceptions.StoryActionError('Unable to obtain memory dump')
return dump_id
def PrepareForLeakDetection(self):
"""Prepares for Leak Detection.
Terminate workers, stopping spellcheckers, running GC etc.
"""
self._tab.PrepareForLeakDetection()
def Navigate(self, url, script_to_evaluate_on_commit=None,
timeout_in_seconds=page_action.DEFAULT_TIMEOUT):
"""Navigates to |url|.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
if six.moves.urllib.parse.urlparse(url).scheme == 'file':
url = self._tab.browser.platform.http_server.UrlOf(url[7:])
self._RunAction(NavigateAction(
url=url,
script_to_evaluate_on_commit=script_to_evaluate_on_commit,
timeout_in_seconds=timeout_in_seconds))
def NavigateBack(self):
""" Navigate back to the previous page."""
self.ExecuteJavaScript('window.history.back()')
def WaitForNavigate(
self, timeout_in_seconds_seconds=page_action.DEFAULT_TIMEOUT):
start_time = time.time()
self._tab.WaitForNavigate(timeout_in_seconds_seconds)
time_left_in_seconds = (start_time + timeout_in_seconds_seconds
- time.time())
time_left_in_seconds = max(0, time_left_in_seconds)
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter(
time_left_in_seconds)
def ReloadPage(self):
"""Reloads the page."""
self._tab.ExecuteJavaScript('window.location.reload()')
self._tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
def ExecuteJavaScript(self, *args, **kwargs):
"""Executes a given JavaScript statement. Does not return the result.
Example: runner.ExecuteJavaScript('var foo = {{ value }};', value='hi');
Args:
statement: The statement to execute (provided as a string).
Optional keyword args:
timeout: The number of seconds to wait for the statement to execute.
Additional keyword arguments provide values to be interpolated within
the statement. See telemetry.util.js_template for details.
Raises:
EvaluationException: The statement failed to execute.
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = page_action.DEFAULT_TIMEOUT
return self._tab.ExecuteJavaScript(*args, **kwargs)
def EvaluateJavaScript(self, *args, **kwargs):
"""Returns the result of evaluating a given JavaScript expression.
The evaluation results must be convertible to JSON. If the result
is not needed, use ExecuteJavaScript instead.
Example: runner.ExecuteJavaScript('document.location.href');
Args:
expression: The expression to execute (provided as a string).
Optional keyword args:
timeout: The number of seconds to wait for the expression to evaluate.
Additional keyword arguments provide values to be interpolated within
the expression. See telemetry.util.js_template for details.
Raises:
EvaluationException: The statement expression failed to execute
or the evaluation result can not be JSON-ized.
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = page_action.DEFAULT_TIMEOUT
return self._tab.EvaluateJavaScript(*args, **kwargs)
def WaitForJavaScriptCondition(self, *args, **kwargs):
"""Wait for a JavaScript condition to become true.
Example: runner.WaitForJavaScriptCondition('window.foo == 10');
Args:
condition: The JavaScript condition (provided as string).
Optional keyword args:
timeout: The number in seconds to wait for the condition to become
True (default to 60).
Additional keyword arguments provide values to be interpolated within
the expression. See telemetry.util.js_template for details.
"""
if 'timeout' not in kwargs:
kwargs['timeout'] = page_action.DEFAULT_TIMEOUT
return self._tab.WaitForJavaScriptCondition(*args, **kwargs)
def Wait(self, seconds):
"""Wait for the number of seconds specified.
Args:
seconds: The number of seconds to wait.
"""
if not self._skip_waits:
time.sleep(seconds)
def WaitForElement(self, selector=None, text=None, element_function=None,
timeout_in_seconds=page_action.DEFAULT_TIMEOUT):
"""Wait for an element to appear in the document.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
timeout_in_seconds: The timeout in seconds.
"""
self._RunAction(WaitForElementAction(
selector=selector, text=text, element_function=element_function,
timeout=timeout_in_seconds))
def TapElement(self, selector=None, text=None, element_function=None,
timeout=page_action.DEFAULT_TIMEOUT):
"""Tap an element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
"""
self._RunAction(TapAction(
selector=selector, text=text, element_function=element_function,
timeout=timeout))
def ClickElement(self, selector=None, text=None, element_function=None):
"""Click an element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'(function() { return foo.element; })()'.
"""
self._RunAction(ClickElementAction(
selector=selector, text=text, element_function=element_function))
def DragPage(self, left_start_ratio, top_start_ratio, left_end_ratio,
top_end_ratio, speed_in_pixels_per_second=800, use_touch=False,
selector=None, text=None, element_function=None):
"""Perform a drag gesture on the page.
You should specify a start and an end point in ratios of page width and
height (see drag.js for full implementation).
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
left_end_ratio: The horizontal ending coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_end_ratio: The vertical ending coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether dragging should be done with touch input.
"""
self._RunAction(DragAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
left_end_ratio=left_end_ratio, top_end_ratio=top_end_ratio,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, selector=selector, text=text,
element_function=element_function))
def PinchPage(self, left_anchor_ratio=0.5, top_anchor_ratio=0.5,
scale_factor=None, speed_in_pixels_per_second=800):
"""Perform the pinch gesture on the page.
It computes the pinch gesture automatically based on the anchor
coordinate and the scale factor. The scale factor is the ratio of
of the final span and the initial span of the gesture.
Args:
left_anchor_ratio: The horizontal pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_anchor_ratio: The vertical pinch anchor coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
scale_factor: The ratio of the final span to the initial span.
The default scale factor is 3.0 / (current scale factor).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(PinchAction(
left_anchor_ratio=left_anchor_ratio, top_anchor_ratio=top_anchor_ratio,
scale_factor=scale_factor,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def ScrollPage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=None, distance_expr=None,
speed_in_pixels_per_second=800, use_touch=False,
synthetic_gesture_source=GESTURE_SOURCE_DEFAULT):
"""Perform scroll gesture on the page.
You may specify distance or distance_expr, but not both. If
neither is specified, the default scroll distance is variable
depending on direction (see scroll.js for full implementation).
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
distance_expr: A JavaScript expression (as string) that can be
evaluated to compute scroll distance. Example:
'window.scrollTop' or '(function() { return crazyMath(); })()'.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether scrolling should be done with touch input.
synthetic_gesture_source: the source input device type for the
synthetic gesture: 'DEFAULT', 'TOUCH' or 'MOUSE'.
"""
assert synthetic_gesture_source in SUPPORTED_GESTURE_SOURCES
self._RunAction(ScrollAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance, distance_expr=distance_expr,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, synthetic_gesture_source=synthetic_gesture_source))
def ScrollPageToElement(self, selector=None, element_function=None,
container_selector=None,
container_element_function=None,
speed_in_pixels_per_second=800):
"""Perform scroll gesture on container until an element is in view.
Both the element and the container can be specified by a CSS selector
xor a JavaScript function, provided as a string, which returns an element.
The element is required so exactly one of selector and element_function
must be provided. The container is optional so at most one of
container_selector and container_element_function can be provided.
The container defaults to document.scrollingElement or document.body if
scrollingElement is not set.
Args:
selector: A CSS selector describing the element.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
container_selector: A CSS selector describing the container element.
container_element_function: A JavaScript function (as a string) that is
used to retrieve the container element.
speed_in_pixels_per_second: Speed to scroll.
"""
self._RunAction(ScrollToElementAction(
selector=selector, element_function=element_function,
container_selector=container_selector,
container_element_function=container_element_function,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def RepeatableBrowserDrivenScroll(self, x_scroll_distance_ratio=0.0,
y_scroll_distance_ratio=0.5,
repeat_count=0,
repeat_delay_ms=250,
timeout=page_action.DEFAULT_TIMEOUT,
prevent_fling=None,
speed=None):
"""Perform a browser driven repeatable scroll gesture.
The scroll gesture is driven from the browser, this is useful because the
main thread often isn't resposive but the browser process usually is, so the
delay between the scroll gestures should be consistent.
Args:
x_scroll_distance_ratio: The horizontal length of the scroll as a fraction
of the screen width.
y_scroll_distance_ratio: The vertical length of the scroll as a fraction
of the screen height.
repeat_count: The number of additional times to repeat the gesture.
repeat_delay_ms: The delay in milliseconds between each scroll gesture.
prevent_fling: Prevents a fling gesture.
speed: Swipe speed in pixels per second.
"""
self._RunAction(RepeatableScrollAction(
x_scroll_distance_ratio=x_scroll_distance_ratio,
y_scroll_distance_ratio=y_scroll_distance_ratio,
repeat_count=repeat_count,
repeat_delay_ms=repeat_delay_ms, timeout=timeout,
prevent_fling=prevent_fling, speed=speed))
def ScrollElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=None, distance_expr=None,
speed_in_pixels_per_second=800, use_touch=False,
synthetic_gesture_source=GESTURE_SOURCE_DEFAULT):
"""Perform scroll gesture on the element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
You may specify distance or distance_expr, but not both. If
neither is specified, the default scroll distance is variable
depending on direction (see scroll.js for full implementation).
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
distance_expr: A JavaScript expression (as string) that can be
evaluated to compute scroll distance. Example:
'window.scrollTop' or '(function() { return crazyMath(); })()'.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
use_touch: Whether scrolling should be done with touch input.
synthetic_gesture_source: the source input device type for the
synthetic gesture: 'DEFAULT', 'TOUCH' or 'MOUSE'.
"""
assert synthetic_gesture_source in SUPPORTED_GESTURE_SOURCES
self._RunAction(ScrollAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance, distance_expr=distance_expr,
speed_in_pixels_per_second=speed_in_pixels_per_second,
use_touch=use_touch, synthetic_gesture_source=synthetic_gesture_source))
def ScrollBouncePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400):
"""Perform scroll bounce gesture on the page.
This gesture scrolls the page by the number of pixels specified in
distance, in the given direction, followed by a scroll by
(distance + overscroll) pixels in the opposite direction.
The above gesture is repeated repeat_count times.
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
overscroll: The number of additional pixels to scroll back, in
addition to the givendistance.
repeat_count: How often we want to repeat the full gesture.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(ScrollBounceAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
overscroll=overscroll, repeat_count=repeat_count,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def ScrollBounceElement(
self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='down', distance=100,
overscroll=10, repeat_count=10,
speed_in_pixels_per_second=400):
"""Perform scroll bounce gesture on the element.
This gesture scrolls on the element by the number of pixels specified in
distance, in the given direction, followed by a scroll by
(distance + overscroll) pixels in the opposite direction.
The above gesture is repeated repeat_count times.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of scroll, either 'left', 'right',
'up', 'down', 'upleft', 'upright', 'downleft', or 'downright'
distance: The distance to scroll (in pixel).
overscroll: The number of additional pixels to scroll back, in
addition to the given distance.
repeat_count: How often we want to repeat the full gesture.
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(ScrollBounceAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
overscroll=overscroll, repeat_count=repeat_count,
speed_in_pixels_per_second=speed_in_pixels_per_second))
def MouseClick(self, selector=None, timeout=page_action.DEFAULT_TIMEOUT):
"""Mouse click the given element.
Args:
selector: A CSS selector describing the element.
"""
self._RunAction(MouseClickAction(selector=selector, timeout=timeout))
def SwipePage(self, left_start_ratio=0.5, top_start_ratio=0.5,
direction='left', distance=100, speed_in_pixels_per_second=800,
timeout=page_action.DEFAULT_TIMEOUT):
"""Perform swipe gesture on the page.
Args:
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
document.body.
direction: The direction of swipe, either 'left', 'right',
'up', or 'down'
distance: The distance to swipe (in pixel).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(SwipeAction(
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
speed_in_pixels_per_second=speed_in_pixels_per_second,
timeout=timeout))
def SwipeElement(self, selector=None, text=None, element_function=None,
left_start_ratio=0.5, top_start_ratio=0.5,
direction='left', distance=100,
speed_in_pixels_per_second=800,
timeout=page_action.DEFAULT_TIMEOUT):
"""Perform swipe gesture on the element.
The element may be selected via selector, text, or element_function.
Only one of these arguments must be specified.
Args:
selector: A CSS selector describing the element.
text: The element must contains this exact text.
element_function: A JavaScript function (as string) that is used
to retrieve the element. For example:
'function() { return foo.element; }'.
left_start_ratio: The horizontal starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
top_start_ratio: The vertical starting coordinate of the
gesture, as a ratio of the visible bounding rectangle for
the element.
direction: The direction of swipe, either 'left', 'right',
'up', or 'down'
distance: The distance to swipe (in pixel).
speed_in_pixels_per_second: The speed of the gesture (in pixels/s).
"""
self._RunAction(SwipeAction(
selector=selector, text=text, element_function=element_function,
left_start_ratio=left_start_ratio, top_start_ratio=top_start_ratio,
direction=direction, distance=distance,
speed_in_pixels_per_second=speed_in_pixels_per_second,
timeout=timeout))
def PressKey(self, key, repeat_count=1, repeat_delay_ms=100,
timeout=page_action.DEFAULT_TIMEOUT):
"""Perform a key press.
Args:
key: DOM value of the pressed key (e.g. 'PageDown', see
https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key).
repeat_count: How many times the key should be pressed.
repeat_delay_ms: Delay after each keypress (including the last one) in
milliseconds.
"""
for _ in range(repeat_count):
self._RunAction(KeyPressAction(key, timeout=timeout))
#2To3-division: this line is unchanged as result is expected floats.
self.Wait(repeat_delay_ms / 1000.0)
def EnterText(self, text, character_delay_ms=100,
timeout=page_action.DEFAULT_TIMEOUT):
"""Enter text by performing key presses.
Args:
text: The text to enter.
character_delay_ms: Delay after each keypress (including the last one) in
milliseconds.
"""
for c in text:
self.PressKey(c, repeat_delay_ms=character_delay_ms, timeout=timeout)
def LoadMedia(self, selector=None, event_timeout_in_seconds=0,
event_to_await='canplaythrough'):
"""Invokes load() on media elements and awaits an event.
Args:
selector: A CSS selector describing the element. If none is
specified, play the first media element on the page. If the
selector matches more than 1 media element, all of them will
be played.
event_timeout_in_seconds: Maximum waiting time for the event to be fired.
0 means do not wait.
event_to_await: Which event to await. For example: 'canplaythrough' or
'loadedmetadata'.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(LoadMediaAction(
selector=selector, timeout_in_seconds=event_timeout_in_seconds,
event_to_await=event_to_await))
def PlayMedia(self, selector=None,
playing_event_timeout_in_seconds=0,
ended_event_timeout_in_seconds=0):
"""Invokes the "play" action on media elements (such as video).
Args:
selector: A CSS selector describing the element. If none is
specified, play the first media element on the page. If the
selector matches more than 1 media element, all of them will
be played.
playing_event_timeout_in_seconds: Maximum waiting time for the "playing"
event (dispatched when the media begins to play) to be fired.
0 means do not wait.
ended_event_timeout_in_seconds: Maximum waiting time for the "ended"
event (dispatched when playback completes) to be fired.
0 means do not wait.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(PlayAction(
selector=selector,
playing_event_timeout_in_seconds=playing_event_timeout_in_seconds,
ended_event_timeout_in_seconds=ended_event_timeout_in_seconds))
def SeekMedia(self, seconds, selector=None, timeout_in_seconds=0,
log_time=True, label=''):
"""Performs a seek action on media elements (such as video).
Args:
seconds: The media time to seek to.
selector: A CSS selector describing the element. If none is
specified, seek the first media element on the page. If the
selector matches more than 1 media element, all of them will
be seeked.
timeout_in_seconds: Maximum waiting time for the "seeked" event
(dispatched when the seeked operation completes) to be
fired. 0 means do not wait.
log_time: Whether to log the seek time for the perf
measurement. Useful when performing multiple seek.
label: A suffix string to name the seek perf measurement.
Raises:
TimeoutException: If the maximum waiting time is exceeded.
"""
self._RunAction(SeekAction(
seconds=seconds, selector=selector,
timeout_in_seconds=timeout_in_seconds,
log_time=log_time, label=label))
def ForceGarbageCollection(self):
"""Forces garbage collection on all relevant systems.
This includes:
- Java heap for browser and child subprocesses (on Android).
- JavaScript on the current renderer.
- System caches (on supported platforms).
"""
# 1) Perform V8 and Blink garbage collection. This may free java wrappers.
self._tab.CollectGarbage()
# 2) Perform Java garbage collection
if self._tab.browser.supports_java_heap_garbage_collection:
self._tab.browser.ForceJavaHeapGarbageCollection()
# 3) Flush system caches. This affects GPU memory.
if self._tab.browser.platform.SupportFlushEntireSystemCache():
self._tab.browser.platform.FlushEntireSystemCache()
# 4) Wait until the effect of Java GC and system cache flushing propagates.
self.Wait(_GARBAGE_COLLECTION_PROPAGATION_TIME)
# 5) Re-do V8 and Blink garbage collection to free garbage allocated
# while waiting.
self._tab.CollectGarbage()
# 6) Finally, finish with V8 and Blink garbage collection because some
# objects require V8 GC => Blink GC => V8 GC roundtrip.
self._tab.CollectGarbage()
def SimulateMemoryPressureNotification(self, pressure_level):
"""Simulate memory pressure notification.
Args:
pressure_level: 'moderate' or 'critical'.
"""
self._tab.browser.SimulateMemoryPressureNotification(pressure_level)
def EnterOverviewMode(self):
if not self._tab.browser.supports_overview_mode:
raise exceptions.StoryActionError('Overview mode is not supported')
self._tab.browser.EnterOverviewMode()
def ExitOverviewMode(self):
if not self._tab.browser.supports_overview_mode:
raise exceptions.StoryActionError('Overview mode is not supported')
self._tab.browser.ExitOverviewMode()
def PauseInteractive(self):
"""Pause the page execution and wait for terminal interaction.
This is typically used for debugging. You can use this to pause
the page execution and inspect the browser state before
continuing.
"""
input("Interacting... Press Enter to continue.")
def RepaintContinuously(self, seconds):
"""Continuously repaints the visible content.
It does this by requesting animation frames until the given number
of seconds have elapsed AND at least three RAFs have been
fired. Times out after max(60, self.seconds), if less than three
RAFs were fired."""
self._RunAction(RepaintContinuouslyAction(
seconds=0 if self._skip_waits else seconds))
def StartMobileDeviceEmulation(
self, width=360, height=640, dsr=2, timeout=60):
"""Emulates a mobile device.
This method is intended for benchmarks used to gather non-performance
metrics only. Mobile emulation is not guaranteed to have the same
performance characteristics as real devices.
Example device parameters:
https://gist.github.com/devinmancuso/0c94410cb14c83ddad6f
Args:
width: Screen width.
height: Screen height.
dsr: Screen device scale factor.
"""
self._tab.StartMobileDeviceEmulation(width, height, dsr, timeout)
def StopMobileDeviceEmulation(self, timeout=60):
"""Stops emulation of a mobile device."""
self._tab.StopMobileDeviceEmulation(timeout)
class Interaction(object):
def __init__(self, action_runner, label, flags):
assert action_runner
assert label
assert isinstance(flags, list)
self._action_runner = action_runner
self._label = label
self._flags = flags
self._started = False
def __enter__(self):
self.Begin()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_value is None:
self.End()
else:
logging.warning(
'Exception was raised in the with statement block, the end of '
'interaction record is not marked.')
def Begin(self):
assert not self._started
self._started = True
self._action_runner.ExecuteJavaScript(
'console.time({{ marker }});',
marker=timeline_interaction_record.GetJavaScriptMarker(
self._label, self._flags))
def End(self):
assert self._started
self._started = False
self._action_runner.ExecuteJavaScript(
'console.timeEnd({{ marker }});',
marker=timeline_interaction_record.GetJavaScriptMarker(
self._label, self._flags))
|
|
import OpenGL
OpenGL.USE_ACCELERATE = True
OpenGL.ERROR_ON_COPY = False
OpenGL.ERROR_CHECKING = False
OpenGL.FULL_LOGGING = False
OpenGL.ARRAY_SIZE_CHECKING = True
import numpy as np
np.set_printoptions(precision=2, suppress=True)
from OpenGL.GL import *
from OpenGL.GL.shaders import compileProgram, compileShader
from OpenGL.arrays import vbo
from math import acos, tan, sqrt
from random import random
from core.transform import *
from core.shaders import fragment_shaded, vertex_shaded
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QOpenGLWidget
IDENTITY = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], dtype=np.float32, order='F').reshape(4, 4)
class GLWidget(QOpenGLWidget):
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.orthographic = True
self.view = IDENTITY.copy()
self.projection = IDENTITY.copy()
self.cam_rotate = IDENTITY.copy()
self.r = 10
self.fov = 80
self.light_pos = (25, 25, 25)
self.light_intensity = 20.0
self.to_render = []
self.to_update = []
def set_perspective_fov(self, fovy, aspect, z_near, z_far):
tan_half_fovy = tan(radians(fovy) / 2)
self.projection = np.array([[1 / (aspect * tan_half_fovy), 0, 0, 0],
[0, 1 / tan_half_fovy, 0, 0],
[0, 0, -(z_far + z_near) / (z_far - z_near), -2 * z_far * z_near / (z_far - z_near)],
[0, 0, -1, 0]], dtype=np.float32).reshape(4, 4)
def set_ortho(self, width, height, z_near, z_far):
r = width / 2
t = height / 2
self.projection = np.array([[1 / r, 0, 0, 0],
[0, 1 / t, 0, 0],
[0, 0, -2 / (z_far - z_near), -(z_far + z_near) / (z_far - z_near)],
[0, 0, 0, 1]], dtype=np.float32).reshape(4, 4)
def initializeGL(self):
pass
def schedule_buffer_update(self, meshes):
self.to_update = meshes
def update_buffers(self):
for data, indices, *stuff in self.to_render:
try:
data.unbind()
indices.unbind()
except:
pass
self.to_render.clear()
for mesh in self.to_update:
data = vbo.VBO(mesh.coords, usage=GL_STATIC_DRAW, target=GL_ARRAY_BUFFER)
indices = vbo.VBO(mesh.indices, usage=GL_STATIC_DRAW, target=GL_ELEMENT_ARRAY_BUFFER)
color = np.array([random(),
random(),
random()], 'f') * 2
self.to_render.append((data, indices, mesh, color))
self.to_update = []
self.compute_view()
self.update()
def paintGL(self):
program = compileProgram(compileShader(vertex_shaded, GL_VERTEX_SHADER),
compileShader(fragment_shaded, GL_FRAGMENT_SHADER))
glLinkProgram(program)
vertex_pos_model_space_ID = glGetAttribLocation(program, 'vertex_pos_model_space')
vertex_normal_model_space_ID = glGetAttribLocation(program, 'vertex_normal_model_space')
MVP_UID = glGetUniformLocation(program, 'MVP')
M_UID = glGetUniformLocation(program, 'M')
V_UID = glGetUniformLocation(program, 'V')
color_UID = glGetUniformLocation(program, 'color')
light_intensity_UID = glGetUniformLocation(program, 'light_intensity')
light_pos_world_space_UID = glGetUniformLocation(program, 'light_pos_world_space')
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LESS)
glClearColor(0.2, 0.3, 0.35, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
for data, indices, mesh, color in self.to_render:
try:
glUseProgram(program)
MVP = mmul(self.projection, self.view, mesh.transform.get_matrix())
glUniformMatrix4fv(MVP_UID, 1, GL_FALSE, MVP)
glUniformMatrix4fv(M_UID, 1, GL_FALSE, mesh.transform.get_matrix())
glUniformMatrix4fv(V_UID, 1, GL_FALSE, self.view)
glUniform1f(light_intensity_UID, np.single(30 * self.light_intensity))
glUniform3f(color_UID, *color)
glUniform3f(light_pos_world_space_UID, *np.array(self.light_pos, 'f'))
data.bind()
indices.bind()
try:
glEnableVertexAttribArray(vertex_pos_model_space_ID)
glVertexAttribPointer(vertex_pos_model_space_ID, 3, GL_FLOAT, GL_FALSE, 12, data)
glEnableVertexAttribArray(vertex_normal_model_space_ID)
glVertexAttribPointer(vertex_normal_model_space_ID, 3, GL_FLOAT, GL_FALSE, 12, data)
glDrawElements(GL_TRIANGLES, len(indices),
GL_UNSIGNED_INT, indices)
finally:
indices.unbind()
data.unbind()
glDisableVertexAttribArray(vertex_pos_model_space_ID)
glDisableVertexAttribArray(vertex_normal_model_space_ID)
finally:
glUseProgram(0)
def resizeGL(self, width, height):
self.width, self.height = width, height
self.compute_view()
def mousePressEvent(self, event):
self.last_pos = event.x(), event.y()
def mouseMoveEvent(self, event):
new_pos = event.x(), event.y()
if event.buttons() & Qt.LeftButton:
self.arcball_rotate(new_pos, self.last_pos)
self.compute_view()
if event.buttons() & Qt.RightButton:
self.zoom_camera(new_pos[0] - self.last_pos[0])
self.compute_view()
if event.buttons() & Qt.MidButton:
self.fov += new_pos[0] - self.last_pos[0]
self.compute_view()
self.last_pos = new_pos
def keyPressEvent(self, event):
if event.key() == Qt.Key_Escape:
self.close()
def arcball_rotate(self, p1, p2):
a = self.get_arcball_vector(*p1)
b = self.get_arcball_vector(*p2)
angle = acos(min(1, np.dot(a, b)))
axis = np.cross(a, b)
self.cam_rotate = mmul(rotate_axis(angle * 180, *axis), self.cam_rotate)
def zoom_camera(self, delta):
self.r += delta
if self.r < 1:
self.r = 1
def get_arcball_vector(self, x, y):
p = np.array([(x / self.width * 2) - 1,
1 - (y / self.height * 2),
0])
op_squared = p[0] * p[0] + p[1] * p[1]
if op_squared <= 1:
p[2] = sqrt(1 - op_squared)
else:
p = normalized(p)
return p
def compute_view(self):
self.camera_pos = mmul(self.cam_rotate, np.array([0, 0, self.r / 100, 1]))
self.view = look_at(self.camera_pos[0], self.camera_pos[1], self.camera_pos[2],
0, 0, 0,
0, 1, 0)
if self.orthographic:
ar = self.width / self.height
s = self.r / 100
self.set_ortho(ar * s, s, -100, 100)
else:
self.set_perspective_fov(np.clip(self.fov, 1, 179), self.width / self.height, 0.1, 100)
self.update()
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
widget = GLWidget(None)
widget.show()
sys.exit(app.exec_())
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.versioning import AsdfVersion
from astropy.modeling import mappings
from astropy.modeling import functional_models
from astropy.modeling.core import CompoundModel
from astropy.io.misc.asdf.types import AstropyAsdfType, AstropyType
from . import _parameter_to_value
__all__ = ['TransformType', 'IdentityType', 'ConstantType']
class TransformType(AstropyAsdfType):
version = '1.2.0'
requires = ['astropy']
@classmethod
def _from_tree_base_transform_members(cls, model, node, ctx):
if 'name' in node:
model.name = node['name']
if 'bounding_box' in node:
model.bounding_box = node['bounding_box']
if "inputs" in node:
if model.n_inputs == 1:
model.inputs = (node["inputs"],)
else:
model.inputs = tuple(node["inputs"])
if "outputs" in node:
if model.n_outputs == 1:
model.outputs = (node["outputs"],)
else:
model.outputs = tuple(node["outputs"])
param_and_model_constraints = {}
for constraint in ['fixed', 'bounds']:
if constraint in node:
param_and_model_constraints[constraint] = node[constraint]
model._initialize_constraints(param_and_model_constraints)
yield model
if 'inverse' in node:
model.inverse = node['inverse']
@classmethod
def from_tree_transform(cls, node, ctx):
raise NotImplementedError(
"Must be implemented in TransformType subclasses")
@classmethod
def from_tree(cls, node, ctx):
model = cls.from_tree_transform(node, ctx)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def _to_tree_base_transform_members(cls, model, node, ctx):
if getattr(model, '_user_inverse', None) is not None:
node['inverse'] = model._user_inverse
if model.name is not None:
node['name'] = model.name
try:
bb = model.bounding_box
except NotImplementedError:
bb = None
if bb is not None:
if model.n_inputs == 1:
bb = list(bb)
else:
bb = [list(item) for item in model.bounding_box]
node['bounding_box'] = bb
if type(model.__class__.inputs) != property:
node['inputs'] = model.inputs
node['outputs'] = model.outputs
# model / parameter constraints
if not isinstance(model, CompoundModel):
fixed_nondefaults = {k: f for k, f in model.fixed.items() if f}
if fixed_nondefaults:
node['fixed'] = fixed_nondefaults
bounds_nondefaults = {k: b for k, b in model.bounds.items() if any(b)}
if bounds_nondefaults:
node['bounds'] = bounds_nondefaults
return node
@classmethod
def to_tree_transform(cls, model, ctx):
raise NotImplementedError("Must be implemented in TransformType subclasses")
@classmethod
def to_tree(cls, model, ctx):
node = cls.to_tree_transform(model, ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
assert a.name == b.name
# TODO: Assert inverses are the same
class IdentityType(TransformType):
name = "transform/identity"
types = ['astropy.modeling.mappings.Identity']
@classmethod
def from_tree_transform(cls, node, ctx):
return mappings.Identity(node.get('n_dims', 1))
@classmethod
def to_tree_transform(cls, data, ctx):
node = {}
if data.n_inputs != 1:
node['n_dims'] = data.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, mappings.Identity) and
isinstance(b, mappings.Identity) and
a.n_inputs == b.n_inputs)
class ConstantType(TransformType):
name = "transform/constant"
version = '1.4.0'
supported_versions = ['1.0.0', '1.1.0', '1.2.0', '1.3.0', '1.4.0']
types = ['astropy.modeling.functional_models.Const1D',
'astropy.modeling.functional_models.Const2D']
@classmethod
def from_tree_transform(cls, node, ctx):
if cls.version < AsdfVersion('1.4.0'):
# The 'dimensions' property was added in 1.4.0,
# previously all values were 1D.
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 1:
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 2:
return functional_models.Const2D(node['value'])
@classmethod
def to_tree_transform(cls, data, ctx):
if cls.version < AsdfVersion('1.4.0'):
if not isinstance(data, functional_models.Const1D):
raise ValueError(
f'constant-{cls.version} does not support models with > 1 dimension')
return {
'value': _parameter_to_value(data.amplitude)
}
else:
if isinstance(data, functional_models.Const1D):
dimension = 1
elif isinstance(data, functional_models.Const2D):
dimension = 2
return {
'value': _parameter_to_value(data.amplitude),
'dimensions': dimension
}
class GenericModel(mappings.Mapping):
def __init__(self, n_inputs, n_outputs):
mapping = tuple(range(n_inputs))
super().__init__(mapping)
self._n_outputs = n_outputs
self._outputs = tuple('x' + str(idx) for idx in range(n_outputs))
@property
def inverse(self):
raise NotImplementedError()
class GenericType(TransformType):
name = "transform/generic"
types = [GenericModel]
@classmethod
def from_tree_transform(cls, node, ctx):
return GenericModel(
node['n_inputs'], node['n_outputs'])
@classmethod
def to_tree_transform(cls, data, ctx):
return {
'n_inputs': data.n_inputs,
'n_outputs': data.n_outputs
}
class UnitsMappingType(AstropyType):
name = "transform/units_mapping"
version = "1.0.0"
types = [mappings.UnitsMapping]
@classmethod
def to_tree(cls, node, ctx):
tree = {}
if node.name is not None:
tree["name"] = node.name
inputs = []
outputs = []
for i, o, m in zip(node.inputs, node.outputs, node.mapping):
input = {
"name": i,
"allow_dimensionless": node.input_units_allow_dimensionless[i],
}
if m[0] is not None:
input["unit"] = m[0]
if node.input_units_equivalencies is not None and i in node.input_units_equivalencies:
input["equivalencies"] = node.input_units_equivalencies[i]
inputs.append(input)
output = {
"name": o,
}
if m[-1] is not None:
output["unit"] = m[-1]
outputs.append(output)
tree["inputs"] = inputs
tree["outputs"] = outputs
return tree
@classmethod
def from_tree(cls, tree, ctx):
mapping = tuple((i.get("unit"), o.get("unit"))
for i, o in zip(tree["inputs"], tree["outputs"]))
equivalencies = None
for i in tree["inputs"]:
if "equivalencies" in i:
if equivalencies is None:
equivalencies = {}
equivalencies[i["name"]] = i["equivalencies"]
kwargs = {
"input_units_equivalencies": equivalencies,
"input_units_allow_dimensionless": {
i["name"]: i.get("allow_dimensionless", False) for i in tree["inputs"]},
}
if "name" in tree:
kwargs["name"] = tree["name"]
return mappings.UnitsMapping(mapping, **kwargs)
|
|
# ------------------------------------------------------------------------
#
# Copyright 2005-2015 WSO2, Inc. (http://wso2.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
# ------------------------------------------------------------------------
import subprocess
import socket
import os
from plugins.contracts import ICartridgeAgentPlugin
from modules.util.log import LogFactory
from entity import *
from config import Config
class WSO2StartupHandler(ICartridgeAgentPlugin):
"""
Configures and starts configurator, carbon server
"""
log = LogFactory().get_log(__name__)
# class constants
CONST_PORT_MAPPINGS = "PORT_MAPPINGS"
CONST_APPLICATION_ID = "APPLICATION_ID"
CONST_MB_IP = "MB_IP"
CONST_SERVICE_NAME = "SERVICE_NAME"
CONST_CLUSTER_ID = "CLUSTER_ID"
CONST_WORKER = "worker"
CONST_MANAGER = "manager"
CONST_MGT = "mgt"
CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT = "mgt-http"
CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT = "mgt-https"
CONST_PROTOCOL_HTTP = "http"
CONST_PROTOCOL_HTTPS = "https"
CONST_PPAAS_MEMBERSHIP_SCHEME = "private-paas"
CONST_PRODUCT = "BRS"
SERVICES = ["wso2brs-210-manager", "wso2brs-210-worker"]
# list of environment variables exported by the plugin
ENV_CONFIG_PARAM_SUB_DOMAIN = 'CONFIG_PARAM_SUB_DOMAIN'
ENV_CONFIG_PARAM_MB_HOST = 'CONFIG_PARAM_MB_HOST'
ENV_CONFIG_PARAM_CLUSTER_IDs = 'CONFIG_PARAM_CLUSTER_IDs'
ENV_CONFIG_PARAM_HTTP_PROXY_PORT = 'CONFIG_PARAM_HTTP_PROXY_PORT'
ENV_CONFIG_PARAM_HTTPS_PROXY_PORT = 'CONFIG_PARAM_HTTPS_PROXY_PORT'
ENV_CONFIG_PARAM_HOST_NAME = 'CONFIG_PARAM_HOST_NAME'
ENV_CONFIG_PARAM_MGT_HOST_NAME = 'CONFIG_PARAM_MGT_HOST_NAME'
ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST = 'CONFIG_PARAM_LOCAL_MEMBER_HOST'
# clustering related environment variables read from payload_parameters
ENV_CONFIG_PARAM_CLUSTERING = 'CONFIG_PARAM_CLUSTERING'
ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME = 'CONFIG_PARAM_MEMBERSHIP_SCHEME'
def run_plugin(self, values):
# read from 'values'
port_mappings_str = values[self.CONST_PORT_MAPPINGS].replace("'", "")
app_id = values[self.CONST_APPLICATION_ID]
mb_ip = values[self.CONST_MB_IP]
service_type = values[self.CONST_SERVICE_NAME]
my_cluster_id = values[self.CONST_CLUSTER_ID]
clustering = values.get(self.ENV_CONFIG_PARAM_CLUSTERING, 'false')
membership_scheme = values.get(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME)
# read topology from PCA TopologyContext
topology = TopologyContext.topology
# log above values
WSO2StartupHandler.log.info("Port Mappings: %s" % port_mappings_str)
WSO2StartupHandler.log.info("Application ID: %s" % app_id)
WSO2StartupHandler.log.info("MB IP: %s" % mb_ip)
WSO2StartupHandler.log.info("Service Name: %s" % service_type)
WSO2StartupHandler.log.info("Cluster ID: %s" % my_cluster_id)
WSO2StartupHandler.log.info("Clustering: %s" % clustering)
WSO2StartupHandler.log.info("Membership Scheme: %s" % membership_scheme)
# export Proxy Ports as Env. variables - used in catalina-server.xml
mgt_http_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTP_TRANSPORT,
self.CONST_PROTOCOL_HTTP)
mgt_https_proxy_port = self.read_proxy_port(port_mappings_str, self.CONST_PORT_MAPPING_MGT_HTTPS_TRANSPORT,
self.CONST_PROTOCOL_HTTPS)
self.export_env_var(self.ENV_CONFIG_PARAM_HTTP_PROXY_PORT, mgt_http_proxy_port)
self.export_env_var(self.ENV_CONFIG_PARAM_HTTPS_PROXY_PORT, mgt_https_proxy_port)
# set sub-domain
sub_domain = None
if service_type.endswith(self.CONST_MANAGER):
sub_domain = self.CONST_MGT
elif service_type.endswith(self.CONST_WORKER):
sub_domain = self.CONST_WORKER
self.export_env_var(self.ENV_CONFIG_PARAM_SUB_DOMAIN, sub_domain)
# if CONFIG_PARAM_MEMBERSHIP_SCHEME is not set, set the private-paas membership scheme as default one
if clustering == 'true' and membership_scheme is None:
membership_scheme = self.CONST_PPAAS_MEMBERSHIP_SCHEME
self.export_env_var(self.ENV_CONFIG_PARAM_MEMBERSHIP_SCHEME, membership_scheme)
# check if clustering is enabled
if clustering == 'true':
# set hostnames
self.export_host_names(topology, app_id)
# check if membership scheme is set to 'private-paas'
if membership_scheme == self.CONST_PPAAS_MEMBERSHIP_SCHEME:
# export Cluster_Ids as Env. variables - used in axis2.xml
self.export_cluster_ids(topology, app_id, service_type, my_cluster_id)
# export mb_ip as Env.variable - used in jndi.properties
self.export_env_var(self.ENV_CONFIG_PARAM_MB_HOST, mb_ip)
# set instance private ip as CONFIG_PARAM_LOCAL_MEMBER_HOST
private_ip = self.get_member_private_ip(topology, Config.service_name, Config.cluster_id, Config.member_id)
self.export_env_var(self.ENV_CONFIG_PARAM_LOCAL_MEMBER_HOST, private_ip)
# start configurator
WSO2StartupHandler.log.info("Configuring WSO2 %s..." % self.CONST_PRODUCT)
config_command = "python ${CONFIGURATOR_HOME}/configurator.py"
env_var = os.environ.copy()
p = subprocess.Popen(config_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2StartupHandler.log.info("WSO2 %s configured successfully" % self.CONST_PRODUCT)
# start server
WSO2StartupHandler.log.info("Starting WSO2 %s ..." % self.CONST_PRODUCT)
if service_type.endswith(self.CONST_WORKER):
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -DworkerNode=true start"
else:
start_command = "exec ${CARBON_HOME}/bin/wso2server.sh -Dsetup start"
env_var = os.environ.copy()
p = subprocess.Popen(start_command, env=env_var, shell=True)
output, errors = p.communicate()
WSO2StartupHandler.log.info("WSO2 %s started successfully" % self.CONST_PRODUCT)
def get_member_private_ip(self, topology, service_name, cluster_id, member_id):
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
member = cluster.get_member(member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % member_id)
if member.member_default_private_ip and not member.member_default_private_ip.isspace():
WSO2StartupHandler.log.info(
"Member private ip read from the topology: %s" % member.member_default_private_ip)
return member.member_default_private_ip
else:
local_ip = socket.gethostbyname(socket.gethostname())
WSO2StartupHandler.log.info(
"Member private ip not found in the topology. Reading from the socket interface: %s" % local_ip)
return local_ip
def export_host_names(self, topology, app_id):
"""
Set hostnames of services read from topology for worker manager instances
exports MgtHostName and HostName
:return: void
"""
mgt_host_name = None
host_name = None
for service_name in self.SERVICES:
if service_name.endswith(self.CONST_MANAGER):
mgr_cluster = self.get_cluster_of_service(topology, service_name, app_id)
if mgr_cluster is not None:
mgt_host_name = mgr_cluster.hostnames[0]
elif service_name.endswith(self.CONST_WORKER):
worker_cluster = self.get_cluster_of_service(topology, service_name, app_id)
if worker_cluster is not None:
host_name = worker_cluster.hostnames[0]
self.export_env_var(self.ENV_CONFIG_PARAM_MGT_HOST_NAME, mgt_host_name)
self.export_env_var(self.ENV_CONFIG_PARAM_HOST_NAME, host_name)
def export_cluster_ids(self, topology, app_id, service_type, my_cluster_id):
"""
Set clusterIds of services read from topology for worker manager instances
else use own clusterId
:return: void
"""
cluster_ids = []
cluster_id_of_service = None
if service_type.endswith(self.CONST_MANAGER) or service_type.endswith(self.CONST_WORKER):
for service_name in self.SERVICES:
cluster_of_service = self.get_cluster_of_service(topology, service_name, app_id)
if cluster_of_service is not None:
cluster_id_of_service = cluster_of_service.cluster_id
if cluster_id_of_service is not None:
cluster_ids.append(cluster_id_of_service)
else:
cluster_ids.append(my_cluster_id)
# If clusterIds are available, export them as environment variables
if cluster_ids:
cluster_ids_string = ",".join(cluster_ids)
self.export_env_var(self.ENV_CONFIG_PARAM_CLUSTER_IDs, cluster_ids_string)
@staticmethod
def get_cluster_of_service(topology, service_name, app_id):
cluster_obj = None
clusters = None
if topology is not None:
if topology.service_exists(service_name):
service = topology.get_service(service_name)
if service is not None:
clusters = service.get_clusters()
else:
WSO2StartupHandler.log.warn("[Service] %s is None" % service_name)
else:
WSO2StartupHandler.log.warn("[Service] %s is not available in topology" % service_name)
else:
WSO2StartupHandler.log.warn("Topology is empty.")
if clusters is not None:
for cluster in clusters:
if cluster.app_id == app_id:
cluster_obj = cluster
return cluster_obj
@staticmethod
def read_proxy_port(port_mappings_str, port_mapping_name, port_mapping_protocol):
"""
returns proxy port of the requested port mapping
:return: void
"""
# port mappings format: NAME:mgt-http|PROTOCOL:http|PORT:30001|PROXY_PORT:0|TYPE:NodePort;
# NAME:mgt-https|PROTOCOL:https|PORT:30002|PROXY_PORT:0|TYPE:NodePort;
# NAME:pt-http|PROTOCOL:http|PORT:30003|PROXY_PORT:7280|TYPE:ClientIP;
# NAME:pt-https|PROTOCOL:https|PORT:30004|PROXY_PORT:7243|TYPE:NodePort
if port_mappings_str is not None:
port_mappings_array = port_mappings_str.split(";")
if port_mappings_array:
for port_mapping in port_mappings_array:
# WSO2StartupHandler.log.debug("port_mapping: %s" % port_mapping)
name_value_array = port_mapping.split("|")
name = name_value_array[0].split(":")[1]
protocol = name_value_array[1].split(":")[1]
proxy_port = name_value_array[3].split(":")[1]
# If PROXY_PORT is not set, set PORT as the proxy port (ex:Kubernetes),
if proxy_port == '0':
proxy_port = name_value_array[2].split(":")[1]
if name == port_mapping_name and protocol == port_mapping_protocol:
return proxy_port
@staticmethod
def export_env_var(variable, value):
"""
exports key value pairs as env. variables
:return: void
"""
if value is not None:
os.environ[variable] = value
WSO2StartupHandler.log.info("Exported environment variable %s: %s" % (variable, value))
else:
WSO2StartupHandler.log.warn("Could not export environment variable %s " % variable)
|
|
#!/usr/bin/env python
import uuid
import json
from time import time
import gevent
from gevent import event as gevent_event
from pyon.public import log
from pyon.core.exception import NotFound, BadRequest, ServerError
from pyon.util.containers import create_valid_identifier
from pyon.ion.event import EventPublisher, EventSubscriber
from interface.services.core.iprocess_dispatcher_service import BaseProcessDispatcherService
from interface.objects import ProcessStateEnum, Process
class ProcessStateGate(EventSubscriber):
"""
Ensure that we get a particular state, now or in the future.
Usage:
gate = ProcessStateGate(your_process_dispatcher_client.read_process, process_id, ProcessStateEnum.some_state)
assert gate.await(timeout_in_seconds)
This pattern returns True immediately upon reaching the desired state, or False if the timeout is reached.
This pattern avoids a race condition between read_process and using EventGate.
"""
def __init__(self, read_process_fn=None, process_id='', desired_state=None, *args, **kwargs):
if not process_id:
raise BadRequest("ProcessStateGate trying to wait on invalid process (id = '%s')" % process_id)
EventSubscriber.__init__(self, *args,
callback=self.trigger_cb,
event_type="ProcessLifecycleEvent",
origin=process_id,
origin_type="DispatchedProcess",
**kwargs)
self.desired_state = desired_state
self.process_id = process_id
self.read_process_fn = read_process_fn
self.last_chance = None
self.first_chance = None
_ = ProcessStateEnum._str_map[self.desired_state] # make sure state exists
log.info("ProcessStateGate is going to wait on process '%s' for state '%s'",
self.process_id,
ProcessStateEnum._str_map[self.desired_state])
def trigger_cb(self, event, x):
if event.state == self.desired_state:
self.gate.set()
else:
log.info("ProcessStateGate received an event for state %s, wanted %s",
ProcessStateEnum._str_map[event.state],
ProcessStateEnum._str_map[self.desired_state])
log.info("ProcessStateGate received (also) variable x = %s", x)
def in_desired_state(self):
# check whether the process we are monitoring is in the desired state as of this moment
# Once pd creates the process, process_obj is never None
try:
process_obj = self.read_process_fn(self.process_id)
return (process_obj and self.desired_state == process_obj.process_state)
except NotFound:
return False
def await(self, timeout=0):
#set up the event gate so that we don't miss any events
start_time = time()
self.gate = gevent_event.Event()
self.start()
#if it's in the desired state, return immediately
if self.in_desired_state():
self.first_chance = True
self.stop()
log.info("ProcessStateGate found process already %s -- NO WAITING",
ProcessStateEnum._str_map[self.desired_state])
return True
#if the state was not where we want it, wait for the event.
ret = self.gate.wait(timeout)
self.stop()
if ret:
# timer is already stopped in this case
log.info("ProcessStateGate received %s event after %0.2f seconds",
ProcessStateEnum._str_map[self.desired_state],
time() - start_time)
else:
log.info("ProcessStateGate timed out waiting to receive %s event",
ProcessStateEnum._str_map[self.desired_state])
# sanity check for this pattern
self.last_chance = self.in_desired_state()
if self.last_chance:
log.warn("ProcessStateGate was successful reading %s on last_chance; " +
"should the state change for '%s' have taken %s seconds exactly?",
ProcessStateEnum._str_map[self.desired_state],
self.process_id,
timeout)
return ret or self.last_chance
def _get_last_chance(self):
return self.last_chance
def _get_first_chance(self):
return self.first_chance
class ProcessDispatcherService(BaseProcessDispatcherService):
# local container mode - spawn directly in the local container
# without going through any external functionality. This is
# the default mode.
def on_init(self):
self.backend = PDLocalBackend(self.container)
def on_start(self):
self.backend.initialize()
def on_quit(self):
self.backend.shutdown()
def create_process_definition(self, process_definition=None, process_definition_id=None):
"""Creates a Process Definition based on given object.
@param process_definition ProcessDefinition
@param process_definition_id desired process definition ID
@retval process_definition_id str
@throws BadRequest if object passed has _id or _rev attribute
"""
if not (process_definition.module and process_definition.class_name):
raise BadRequest("process definition must have module and class")
return self.backend.create_definition(process_definition, process_definition_id)
def read_process_definition(self, process_definition_id=''):
"""Returns a Process Definition as object.
@param process_definition_id str
@retval process_definition ProcessDefinition
@throws NotFound object with specified id does not exist
"""
return self.backend.read_definition(process_definition_id)
def delete_process_definition(self, process_definition_id=''):
"""Deletes/retires a Process Definition.
@param process_definition_id str
@throws NotFound object with specified id does not exist
"""
self.backend.delete_definition(process_definition_id)
def create_process(self, process_definition_id=''):
"""Create a process resource and process id. Does not yet start the process
@param process_definition_id str
@retval process_id str
@throws NotFound object with specified id does not exist
"""
if not process_definition_id:
raise NotFound('No process definition was provided')
process_definition = self.backend.read_definition(process_definition_id)
# try to get a unique but still descriptive name
process_id = str(process_definition.name or "process") + uuid.uuid4().hex
process_id = create_valid_identifier(process_id, ws_sub='_')
self.backend.create(process_id, process_definition_id)
try:
process = Process(process_id=process_id)
self.container.resource_registry.create(process, object_id=process_id)
except BadRequest:
log.debug("Tried to create Process %s, but already exists. This is normally ok.", process_id)
return process_id
def schedule_process(self, process_definition_id='', schedule=None, configuration=None, process_id='', name=''):
"""Schedule a process definition for execution on an Execution Engine. If no process id is given,
a new unique ID is generated.
@param process_definition_id str
@param schedule ProcessSchedule
@param configuration IngestionConfiguration
@param process_id str
@retval process_id str
@throws BadRequest if object passed has _id or _rev attribute
@throws NotFound object with specified id does not exist
"""
if not process_definition_id:
raise NotFound('No process definition was provided')
process_definition = self.backend.read_definition(process_definition_id)
if configuration is None:
configuration = {}
else:
# push the config through a JSON serializer to ensure that the same
# config would work with the bridge backend
try:
json.dumps(configuration)
except TypeError, e:
raise BadRequest("bad configuration: " + str(e))
# If not provided, create a unique but still descriptive (valid) id
if not process_id:
process_id = str(process_definition.name or "process") + uuid.uuid4().hex
process_id = create_valid_identifier(process_id, ws_sub='_')
# If not provided, create a unique but still descriptive (valid) name
if not name:
name = self._get_process_name(process_definition, configuration)
try:
process = Process(process_id=process_id, name=name)
self.container.resource_registry.create(process, object_id=process_id)
except BadRequest:
log.debug("Tried to create Process %s, but already exists. This is normally ok.",
process_id)
return self.backend.schedule(process_id, process_definition_id,
schedule, configuration, name)
def cancel_process(self, process_id=''):
"""Cancels the execution of the given process id.
@param process_id str
@retval success bool
@throws NotFound object with specified id does not exist
"""
if not process_id:
raise NotFound('No process was provided')
cancel_result = self.backend.cancel(process_id)
return cancel_result
def read_process(self, process_id=''):
"""Returns a Process as an object.
@param process_id str
@retval process Process
@throws NotFound object with specified id does not exist
"""
if not process_id:
raise NotFound('No process was provided')
return self.backend.read_process(process_id)
def list_processes(self):
"""Lists managed processes
@retval processes list
"""
return self.backend.list()
def _get_process_name(self, process_definition, configuration):
base_name = ""
name_suffix = ""
ha_config = configuration.get('highavailability')
if ha_config:
if ha_config.get('process_definition_name'):
base_name = ha_config['process_definition_name']
name_suffix = "ha"
elif ha_config.get('process_definition_id'):
inner_definition = self.backend.read_definition(
ha_config['process_definition_id'])
base_name = inner_definition.name
name_suffix = "ha"
name_parts = [str(base_name or process_definition.name or "process")]
if name_suffix:
name_parts.append(name_suffix)
name_parts.append(uuid.uuid4().hex)
name = '-'.join(name_parts)
return name
class PDLocalBackend(object):
"""Scheduling backend to PD that manages processes in the local container
This implementation is the default and is used in single-container
deployments where there is no CEI launch to leverage.
"""
# We attempt to make the local backend act a bit more like the real thing.
# Process spawn requests are asynchronous (not completed by the time the
# operation returns). Therefore, callers need to listen for events to find
# the success of failure of the process launch. To make races here more
# detectable, we introduce an artificial delay between when
# schedule_process() returns and when the process is actually launched.
SPAWN_DELAY = 0
def __init__(self, container):
self.container = container
self.event_pub = EventPublisher()
self._processes = []
self._spawn_greenlets = set()
# use the container RR instance -- talks directly to db
self.rr = container.resource_registry
def initialize(self):
pass
def shutdown(self):
if self._spawn_greenlets:
try:
gevent.killall(list(self._spawn_greenlets), block=True)
except Exception:
log.warn("Ignoring error while killing spawn greenlets", exc_info=True)
self._spawn_greenlets.clear()
def set_system_boot(self, system_boot):
pass
def create_definition(self, definition, definition_id=None):
pd_id, version = self.rr.create(definition, object_id=definition_id)
return pd_id
def read_definition(self, definition_id):
return self.rr.read(definition_id)
def read_definition_by_name(self, definition_name):
raise ServerError("reading process definitions by name not supported by this backend")
def update_definition(self, definition, definition_id):
raise ServerError("updating process definitions not supported by this backend")
def delete_definition(self, definition_id):
return self.rr.delete(definition_id)
def create(self, process_id, definition_id):
if not self._get_process(process_id):
self._add_process(process_id, {}, ProcessStateEnum.REQUESTED)
return process_id
def schedule(self, process_id, definition_id, schedule, configuration, name):
definition = self.read_definition(definition_id)
process = self._get_process(process_id)
# in order for this local backend to behave more like the real thing,
# we introduce an artificial delay in spawn requests. This helps flush
# out races where callers try to use a process before it is necessarily
# running.
if self.SPAWN_DELAY:
glet = gevent.spawn_later(self.SPAWN_DELAY, self._inner_spawn,
process_id, definition, schedule, configuration)
self._spawn_greenlets.add(glet)
if process:
process.process_configuration = configuration
else:
self._add_process(process_id, configuration, None)
else:
if process:
process.process_configuration = configuration
else:
self._add_process(process_id, configuration, None)
self._inner_spawn(process_id, name, definition, schedule, configuration)
return process_id
def _inner_spawn(self, process_id, process_name, definition, schedule, configuration):
name = process_name
module = definition.module
cls = definition.class_name
self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
origin=process_id, origin_type="DispatchedProcess",
state=ProcessStateEnum.PENDING)
# Spawn the process
pid = self.container.spawn_process(name=name, module=module, cls=cls,
config=configuration, process_id=process_id)
log.debug('PD: Spawned Process (%s)', pid)
# update state on the existing process
process = self._get_process(process_id)
process.process_state = ProcessStateEnum.RUNNING
self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
origin=process_id, origin_type="DispatchedProcess",
state=ProcessStateEnum.RUNNING)
if self.SPAWN_DELAY:
glet = gevent.getcurrent()
if glet:
self._spawn_greenlets.discard(glet)
return pid
def cancel(self, process_id):
process = self._get_process(process_id)
if process:
try:
self.container.proc_manager.terminate_process(process_id)
log.debug('PD: Terminated Process (%s)', process_id)
except BadRequest, e:
log.warn("PD: Failed to terminate process %s in container. already dead?: %s",
process_id, str(e))
process.process_state = ProcessStateEnum.TERMINATED
try:
self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
origin=process_id, origin_type="DispatchedProcess",
state=ProcessStateEnum.TERMINATED)
except BadRequest, e:
log.warn(e)
else:
raise NotFound("process %s unknown" % (process_id,))
return True
def read_process(self, process_id):
process = self._get_process(process_id)
if process is None:
raise NotFound("process %s unknown" % process_id)
return process
def _add_process(self, pid, config, state):
proc = Process(process_id=pid, process_state=state,
process_configuration=config)
self._processes.append(proc)
def _remove_process(self, pid):
self._processes = filter(lambda u: u.process_id != pid, self._processes)
def _get_process(self, pid):
wanted_procs = filter(lambda u: u.process_id == pid, self._processes)
if len(wanted_procs) >= 1:
return wanted_procs[0]
else:
return None
def list(self):
return self._processes
# map from internal PD states to external ProcessStateEnum values
_PD_PROCESS_STATE_MAP = {
"100-UNSCHEDULED": ProcessStateEnum.REQUESTED,
"150-UNSCHEDULED_PENDING": ProcessStateEnum.REQUESTED,
"200-REQUESTED": ProcessStateEnum.REQUESTED,
"250-DIED_REQUESTED": ProcessStateEnum.REQUESTED,
"300-WAITING": ProcessStateEnum.WAITING,
"350-ASSIGNED": ProcessStateEnum.PENDING,
"400-PENDING": ProcessStateEnum.PENDING,
"500-RUNNING": ProcessStateEnum.RUNNING,
"600-TERMINATING": ProcessStateEnum.TERMINATING,
"700-TERMINATED": ProcessStateEnum.TERMINATED,
"800-EXITED": ProcessStateEnum.EXITED,
"850-FAILED": ProcessStateEnum.FAILED,
"900-REJECTED": ProcessStateEnum.REJECTED
}
_PD_PYON_PROCESS_STATE_MAP = {
ProcessStateEnum.REQUESTED: "200-REQUESTED",
ProcessStateEnum.WAITING: "300-WAITING",
ProcessStateEnum.PENDING: "400-PENDING",
ProcessStateEnum.RUNNING: "500-RUNNING",
ProcessStateEnum.TERMINATING: "600-TERMINATING",
ProcessStateEnum.TERMINATED: "700-TERMINATED",
ProcessStateEnum.EXITED: "800-EXITED",
ProcessStateEnum.FAILED: "850-FAILED",
ProcessStateEnum.REJECTED: "900-REJECTED"
}
def process_state_to_pd_core(process_state):
return _PD_PYON_PROCESS_STATE_MAP[process_state]
def process_state_from_pd_core(core_process_state):
return _PD_PROCESS_STATE_MAP[core_process_state]
class Notifier(object):
"""Sends Process state notifications via ION events
This object is fed into the internal PD core classes
"""
def __init__(self):
self.event_pub = EventPublisher()
def notify_process(self, process):
process_id = process.upid
state = process.state
ion_process_state = _PD_PROCESS_STATE_MAP.get(state)
if not ion_process_state:
log.debug("Received unknown process state from Process Dispatcher." +
" process=%s state=%s", process_id, state)
return
log.debug("Emitting event for process state. process=%s state=%s", process_id, ion_process_state)
try:
self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
origin=process_id, origin_type="DispatchedProcess",
state=ion_process_state)
except Exception:
log.exception("Problem emitting event for process state. process=%s state=%s",
process_id, ion_process_state)
# should be configurable to support multiple process dispatchers?
DEFAULT_HEARTBEAT_QUEUE = "heartbeats"
|
|
"""
blog.test_views.py
==================
Test Views for Blog App
"""
import logging
import datetime
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from accounts.models import AccountsUser
from blog.models import Post
from blog.serializers import PostSerializer
logger = logging.getLogger('test_logger')
class TestPostList(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.superadmin = G(AccountsUser, is_superuser=True, is_staff=True)
self.superadmin_not_staff = G(AccountsUser, is_superuser=True, is_staff=False)
self.client = APIClient()
self.url = reverse('blog:list')
def test_create_post_pass_permissions_superadmin(self):
""" Test creation of post for superadmin. """
# Create Post, change slug (has to be unique) so can use it for post
post = G(Post, author=self.user)
count = Post.objects.count()
post.slug = 'different-slug'
serializer = PostSerializer(post)
# Force Authentication and Post
self.client.force_authenticate(user=self.superadmin)
response = self.client.post(self.url, serializer.data, format='json')
# Basic check: slug is the same, created & object count increased
self.assertEquals(response.status_code, status.HTTP_201_CREATED, "%s" % response.data)
self.assertEquals(Post.objects.count(), count + 1)
self.assertEquals(response.data['slug'], post.slug, response.data)
def test_create_post_pass_permissions_staff(self):
""" Test create permissions for staff. """
# Testing permissions don't care about data so just generate it first
post = G(Post, author=self.user)
count = Post.objects.count()
post.slug = 'different-slug'
serializer = PostSerializer(post)
# Force Authentication and Post
self.client.force_authenticate(user=self.staff)
response = self.client.post(self.url, serializer.data, format='json')
# Basic check: slug is the same, created & object count increased
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
self.assertEquals(Post.objects.count(), count + 1)
self.assertEquals(response.data['slug'], post.slug)
def test_create_post_pass_permissions_superadmin_not_staff(self):
""" Test create permissions for a superadmin who is not staff. """
# Testing permissions don't care about data so just generate it first
post = G(Post, author=self.user)
count = Post.objects.count()
post.slug = 'different-slug'
serializer = PostSerializer(post)
# Force Authentication and Post
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.post(self.url, serializer.data, format='json')
# Basic check: slug is the same, created & object count increased
self.assertEquals(response.status_code, status.HTTP_201_CREATED)
self.assertEquals(Post.objects.count(), count + 1)
self.assertEquals(response.data['slug'], post.slug)
def test_create_post_fail_permissions_user(self):
""" Test create permissions fail for authenticated users - posts can only be created by staff/superadmin. """
# Testing permissions don't care about data so just generate it first
post = G(Post, author=self.user)
count = Post.objects.count()
serializer = PostSerializer(post)
# Force Authentication and Post
self.client.force_authenticate(user=self.user)
response = self.client.post(self.url, serializer.data, format='json')
# Basic check: slug is the same, created & object count increased
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEquals(Post.objects.count(), count)
self.assertEquals(Post.objects.get(pk=post.pk).slug, post.slug)
def test_get_published_posts_anonymous_user(self):
""" Tests getting a list of published posts only for anonymous users. """
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=False)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 3, "The list of posts retrieved should only include published ")
def test_get_published_posts_normal_authenticated_user(self):
""" Tests getting a list of published posts only for authenticated users. """
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=False)
self.client.force_authenticate(user=self.user)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 3, "The list of posts retrieved should only include published ")
def test_get_all_posts_superadmin(self):
""" Test getting a list of all posts for superadmins. """
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=False)
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 4, "The method should retrieve all posts (published & not published).")
def test_get_all_posts_staff(self):
""" Tests getting a list of all posts for staff users. """
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=False)
self.client.force_authenticate(user=self.staff)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 4, "The method should retrieve all posts (published & not published).")
class TestPostListByYear(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.superadmin = G(AccountsUser, is_superuser=True, is_staff=True)
self.superadmin_not_staff = G(AccountsUser, is_superuser=True, is_staff=False)
self.client = APIClient()
self.year = "2015"
self.url = reverse('blog:list_year', kwargs={'year': self.year})
def test_post_posts_forbidden_normal_user(self):
""" Test post action is forbidden for an normal user. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
# Force Authentication and Post
self.client.force_authenticate(user=self.user)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
response = self.client.post(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_posts_forbidden(self):
""" Test all posts are retrieved for anonymous user. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
response = self.client.put(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_published_posts_by_year(self):
""" Test published posts are retrieved. """
G(Post, author=self.user, published=False, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 2)
class TestPostListByUser(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.client = APIClient()
self.url = reverse('blog:list_user', kwargs={'user': self.user})
def test_posts_patch_method_not_allowed(self):
""" Tests list_user is not allowed for patch method. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.patch(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_posts_post_method_not_allowed(self):
""" Tests list_user is not allowed for post method. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.post(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_posts_put_method_not_allowed(self):
""" Tests list_user is not allowed for put method. """
G(Post, author=self.user, published=True, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.put(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_get_posts_live_by_user_staff(self):
""" Test all posts for a specific author are returned for staff. """
G(Post, author=self.user, published=False, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
self.client.force_authenticate(user=self.staff)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 3)
def test_get_posts_by_user(self):
""" Test published posts for a specific author are returned for anonymous users. """
G(Post, author=self.user, published=False, updated_at=datetime.date(2014, 3, 13))
G(Post, author=self.user, published=True)
G(Post, author=self.user, published=True)
logger.info("%s" % self.url)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 2)
class TestPostListByTag(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.tag = 'tag1'
self.client = APIClient()
self.url = reverse('blog:list_tag', kwargs={'tag': self.tag})
def test_posts_patch_method_not_allowed(self):
""" Tests list_tag is not allowed for patch method. """
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.patch(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_post_post_method_not_allowed(self):
""" Tests list_tag is not allowed for post method. """
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.post(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_posts_put_method_not_allowed(self):
""" Tests list_tag is not allowed for put method. """
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True)
post = G(Post, author=self.user)
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.put(self.url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_get_posts_live_by_tag_staff(self):
""" Test all posts for a specific author are returned for staff. """
G(Post, author=self.user, published=False, tags=[self.tag])
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True, tags=[self.tag])
self.client.force_authenticate(user=self.staff)
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 3)
def test_get_posts_by_tag(self):
""" Test published posts for a specific author are returned for anonymous users. """
G(Post, author=self.user, published=False, tags=[self.tag])
G(Post, author=self.user, published=True, tags=[self.tag])
G(Post, author=self.user, published=True, tags=[self.tag])
response = self.client.get(self.url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), 2)
class TestPostDetail(APITestCase):
def setUp(self):
self.user = G(AccountsUser, is_superuser=False, is_staff=False)
self.staff = G(AccountsUser, is_superuser=False, is_staff=True)
self.superadmin = G(AccountsUser, is_superuser=True, is_staff=True)
self.superadmin_not_staff = G(AccountsUser, is_superuser=True, is_staff=False)
self.client = APIClient()
def test_patch_fail_post_user(self):
""" Tests patch method is forbidden for a normal user. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
slug = 'patching'
self.client.force_authenticate(user=self.user)
response = self.client.patch(url, {'slug': slug}, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_patch_post_staff(self):
""" Test patch method for staff is successful. """
post = G(Post, author=self.user, published=True)
slug = post.slug
logger.info("%s" % slug)
url = reverse('blog:detail', kwargs={'slug': slug})
slug = 'patching'
self.client.force_authenticate(user=self.staff)
response = self.client.patch(url, {'slug': slug}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['slug'], slug)
def test_patch_post_superadmin(self):
""" Test patch method for superadmin is successful. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
slug = 'patching'
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.patch(url, {'slug': slug}, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['slug'], slug)
def test_put_post_superadmin(self):
""" Test put method is successful for superadmin . """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
post.slug = 'putting'
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.put(url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['slug'], serializer.data['slug'])
def test_put_post_staff(self):
""" Test put method is successful for staff. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
post.slug = 'putting'
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.put(url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data['slug'], serializer.data['slug'])
def test_put_fail_not_published_post_user(self):
""" Test put method fails for normal user on non published post. """
post = G(Post, author=self.user, published=False)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
logger.info("fdsfdsfd")
self.client.force_authenticate(user=None)
self.client.force_authenticate(user=self.user)
response = self.client.put(url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_put_fail_published_post_user(self):
""" Test put method fails for normal user on published post. """
post = G(Post, author=self.user, published=True)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
self.client.force_authenticate(user=None)
self.client.force_authenticate(user=self.user)
response = self.client.put(url, serializer.data, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_fail_post_user(self):
""" Test delete method fails for authenticated users. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
self.client.force_authenticate(user=self.user)
response = self.client.delete(url, format='json')
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_post_staff(self):
""" Test delete method is successful for staff. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
self.client.force_authenticate(user=self.staff)
response = self.client.delete(url, format='json')
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
def test_delete_post_superadmin(self):
""" Test delete method is successful for superadmin. """
post = G(Post, author=self.user)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.delete(url, format='json')
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
def test_get_post_anonymous_user(self):
""" Test get method is successful for an anonymous user. """
post = G(Post, author=self.user, published=True)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
response = self.client.get(url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, serializer.data)
def test_get_post_404_for_non_published_anonymous_user(self):
"""
Test get post only get published posts for an anonymous user.
create non published posts -> get it -> 404.
"""
post = G(Post, author=self.user, published=False)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
response = self.client.get(url, format='json')
self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_post_not_published_staff(self):
""" Test get method on non published post by staff is successful. """
post = G(Post, author=self.user, published=False)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.staff)
response = self.client.get(url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, serializer.data)
def test_get_post_not_published_superadmin(self):
""" Test get method on non published post by superadmin is successful. """
post = G(Post, author=self.user, published=False)
slug = post.slug
url = reverse('blog:detail', kwargs={'slug': slug})
serializer = PostSerializer(post)
self.client.force_authenticate(user=self.superadmin_not_staff)
response = self.client.get(url, format='json')
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, serializer.data)
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, unicode_literals
import datetime
import unittest
from airflow import configuration, DAG
from airflow.models import TaskInstance as TI
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.settings import Session
from airflow.utils.state import State
from airflow.exceptions import AirflowException
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
END_DATE = datetime.datetime(2016, 1, 2)
INTERVAL = datetime.timedelta(hours=12)
FROZEN_NOW = datetime.datetime(2016, 1, 2, 12, 1, 1)
class PythonOperatorTest(unittest.TestCase):
def setUp(self):
super(PythonOperatorTest, self).setUp()
configuration.load_test_config()
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.addCleanup(self.dag.clear)
self.clear_run()
self.addCleanup(self.clear_run)
def do_run(self):
self.run = True
def clear_run(self):
self.run = False
def is_run(self):
return self.run
def test_python_operator_run(self):
"""Tests that the python callable is invoked on task run."""
task = PythonOperator(
python_callable=self.do_run,
task_id='python_operator',
dag=self.dag)
self.assertFalse(self.is_run())
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.assertTrue(self.is_run())
def test_python_operator_python_callable_is_callable(self):
"""Tests that PythonOperator will only instantiate if
the python_callable argument is callable."""
not_callable = {}
with self.assertRaises(AirflowException):
PythonOperator(
python_callable=not_callable,
task_id='python_operator',
dag=self.dag)
not_callable = None
with self.assertRaises(AirflowException):
PythonOperator(
python_callable=not_callable,
task_id='python_operator',
dag=self.dag)
class BranchOperatorTest(unittest.TestCase):
def setUp(self):
self.dag = DAG('branch_operator_test',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.branch_op = BranchPythonOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: 'branch_1')
self.branch_1 = DummyOperator(task_id='branch_1', dag=self.dag)
self.branch_1.set_upstream(self.branch_op)
self.branch_2 = DummyOperator(task_id='branch_2', dag=self.dag)
self.branch_2.set_upstream(self.branch_op)
self.dag.clear()
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
session = Session()
tis = session.query(TI).filter(
TI.dag_id == self.dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
session.close()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEquals(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
# should not exist
raise
elif ti.task_id == 'branch_2':
self.assertEquals(ti.state, State.SKIPPED)
else:
raise
def test_with_dag_run(self):
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=datetime.datetime.now(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
self.branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEquals(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEquals(ti.state, State.NONE)
elif ti.task_id == 'branch_2':
self.assertEquals(ti.state, State.SKIPPED)
else:
raise
class ShortCircuitOperatorTest(unittest.TestCase):
def setUp(self):
self.dag = DAG('shortcircuit_operator_test',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE},
schedule_interval=INTERVAL)
self.short_op = ShortCircuitOperator(task_id='make_choice',
dag=self.dag,
python_callable=lambda: self.value)
self.branch_1 = DummyOperator(task_id='branch_1', dag=self.dag)
self.branch_1.set_upstream(self.short_op)
self.upstream = DummyOperator(task_id='upstream', dag=self.dag)
self.upstream.set_downstream(self.short_op)
self.dag.clear()
self.value = True
def test_without_dag_run(self):
"""This checks the defensive against non existent tasks in a dag run"""
self.value = False
self.short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
session = Session()
tis = session.query(TI).filter(
TI.dag_id == self.dag.dag_id,
TI.execution_date == DEFAULT_DATE
)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEquals(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
# should not exist
raise
elif ti.task_id == 'branch_1':
self.assertEquals(ti.state, State.SKIPPED)
else:
raise
self.value = True
self.dag.clear()
self.short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEquals(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
# should not exist
raise
elif ti.task_id == 'branch_1':
self.assertEquals(ti.state, State.NONE)
else:
raise
session.close()
def test_with_dag_run(self):
self.value = False
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=datetime.datetime.now(),
execution_date=DEFAULT_DATE,
state=State.RUNNING
)
self.upstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEquals(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
self.assertEquals(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEquals(ti.state, State.SKIPPED)
else:
raise
self.value = True
self.dag.clear()
self.upstream.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
self.short_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == 'make_choice':
self.assertEquals(ti.state, State.SUCCESS)
elif ti.task_id == 'upstream':
self.assertEquals(ti.state, State.SUCCESS)
elif ti.task_id == 'branch_1':
self.assertEquals(ti.state, State.NONE)
else:
raise
|
|
"""Support for LG TV running on NetCast 3 or 4."""
from datetime import timedelta
import logging
from pylgnetcast import LgNetCastClient, LgNetCastError
from requests import RequestException
import voluptuous as vol
from homeassistant import util
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_HOST,
CONF_NAME,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "LG TV Remote"
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SUPPORT_LGTV = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ACCESS_TOKEN): vol.All(cv.string, vol.Length(max=6)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LG TV platform."""
host = config.get(CONF_HOST)
access_token = config.get(CONF_ACCESS_TOKEN)
name = config.get(CONF_NAME)
client = LgNetCastClient(host, access_token)
add_entities([LgTVDevice(client, name)], True)
class LgTVDevice(MediaPlayerDevice):
"""Representation of a LG TV."""
def __init__(self, client, name):
"""Initialize the LG TV device."""
self._client = client
self._name = name
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._volume = 0
self._channel_name = ""
self._program_name = ""
self._state = None
self._sources = {}
self._source_names = []
def send_command(self, command):
"""Send remote control commands to the TV."""
try:
with self._client as client:
client.send_command(command)
except (LgNetCastError, RequestException):
self._state = STATE_OFF
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Retrieve the latest data from the LG TV."""
try:
with self._client as client:
self._state = STATE_PLAYING
volume_info = client.query_data("volume_info")
if volume_info:
volume_info = volume_info[0]
self._volume = float(volume_info.find("level").text)
self._muted = volume_info.find("mute").text == "true"
channel_info = client.query_data("cur_channel")
if channel_info:
channel_info = channel_info[0]
self._channel_name = channel_info.find("chname").text
self._program_name = channel_info.find("progName").text
channel_list = client.query_data("channel_list")
if channel_list:
channel_names = []
for channel in channel_list:
channel_name = channel.find("chname")
if channel_name is not None:
channel_names.append(str(channel_name.text))
self._sources = dict(zip(channel_names, channel_list))
# sort source names by the major channel number
source_tuples = [
(k, self._sources[k].find("major").text) for k in self._sources
]
sorted_sources = sorted(
source_tuples, key=lambda channel: int(channel[1])
)
self._source_names = [n for n, k in sorted_sources]
except (LgNetCastError, RequestException):
self._state = STATE_OFF
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return the current input source."""
return self._channel_name
@property
def source_list(self):
"""List of available input sources."""
return self._source_names
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_CHANNEL
@property
def media_channel(self):
"""Channel currently playing."""
return self._channel_name
@property
def media_title(self):
"""Title of current playing media."""
return self._program_name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_LGTV
@property
def media_image_url(self):
"""URL for obtaining a screen capture."""
return self._client.url + "data?target=screen_image"
def turn_off(self):
"""Turn off media player."""
self.send_command(1)
def volume_up(self):
"""Volume up the media player."""
self.send_command(24)
def volume_down(self):
"""Volume down media player."""
self.send_command(25)
def mute_volume(self, mute):
"""Send mute command."""
self.send_command(26)
def select_source(self, source):
"""Select input source."""
self._client.change_channel(self._sources[source])
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self._state = STATE_PLAYING
self.send_command(33)
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self._state = STATE_PAUSED
self.send_command(34)
def media_next_track(self):
"""Send next track command."""
self.send_command(36)
def media_previous_track(self):
"""Send the previous track command."""
self.send_command(37)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Filtering postprocessors for SequentialTimeSeriesModels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib import distributions
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import nest
@six.add_metaclass(abc.ABCMeta)
class FilteringStepPostprocessor(object):
"""Base class for processors that are applied after each filter step."""
@abc.abstractmethod
def process_filtering_step(self, current_times, current_values,
predicted_state, filtered_state, outputs):
"""Extends/modifies a filtering step, altering state and loss.
Args:
current_times: A [batch size] integer Tensor of times.
current_values: A [batch size x num features] Tensor of values filtering
is being performed on.
predicted_state: A (possibly nested) list of Tensors indicating model
state which does not take `current_times` and `current_values` into
account.
filtered_state: Same structure as predicted_state, but updated to take
`current_times` and `current_values` into account.
outputs: A dictionary of outputs produced by model filtering
(SequentialTimeSeriesModel._process_filtering_step).
Returns: A tuple of (new_state, updated_outputs);
new_state: Updated state with the same structure as `filtered_state` and
`predicted_state`.
updated_outputs: The `outputs` dictionary, updated with any new outputs
from this filtering postprocessor.
"""
pass
@abc.abstractproperty
def output_names(self):
return []
def cauchy_alternative_to_gaussian(current_times, current_values, outputs):
"""A Cauchy anomaly distribution, centered at a Gaussian prediction.
Performs an entropy-matching approximation of the scale parameters of
independent Cauchy distributions given the covariance matrix of a multivariate
Gaussian in outputs["covariance"], and centers the Cauchy distributions at
outputs["mean"]. This requires that the model that we are creating an
alternative/anomaly distribution for produces a mean and covariance.
Args:
current_times: A [batch size] Tensor of times, unused.
current_values: A [batch size x num features] Tensor of values to evaluate
the anomaly distribution at.
outputs: A dictionary of Tensors with keys "mean" and "covariance"
describing the Gaussian to construct an anomaly distribution from. The
value corresponding to "mean" has shape [batch size x num features], and
the value corresponding to "covariance" has shape [batch size x num
features x num features].
Returns:
A [batch size] Tensor of log likelihoods; the anomaly log PDF evaluated at
`current_values`.
"""
del current_times # unused
cauchy_scale = math_utils.entropy_matched_cauchy_scale(outputs["covariance"])
individual_log_pdfs = distributions.StudentT(
df=array_ops.ones([], dtype=current_values.dtype),
loc=outputs["mean"],
scale=cauchy_scale).log_prob(current_values)
return math_ops.reduce_sum(individual_log_pdfs, axis=1)
def _interpolate_state_linear(first_state, second_state, first_responsibility):
"""Interpolate between two model states linearly."""
interpolated_state_flat = []
for first_state_tensor, second_state_tensor in zip(
nest.flatten(first_state), nest.flatten(second_state)):
assert first_state_tensor.dtype == second_state_tensor.dtype
if first_state_tensor.dtype.is_floating:
# Pad the responsibility shape with ones up to the state's rank so that it
# broadcasts
first_responsibility_padded = array_ops.reshape(
tensor=first_responsibility,
shape=array_ops.concat([
array_ops.shape(first_responsibility), array_ops.ones(
[array_ops.rank(first_state_tensor) - 1], dtype=dtypes.int32)
], 0))
interpolated_state = (
first_responsibility_padded * first_state_tensor
+ (1. - first_responsibility_padded) * second_state_tensor)
interpolated_state.set_shape(first_state_tensor.get_shape())
interpolated_state_flat.append(interpolated_state)
else:
# Integer dtypes are probably representing times, and don't need
# interpolation. Make sure they're identical to be sure.
with ops.control_dependencies(
[check_ops.assert_equal(first_state_tensor, second_state_tensor)]):
interpolated_state_flat.append(array_ops.identity(first_state_tensor))
return nest.pack_sequence_as(first_state, interpolated_state_flat)
class StateInterpolatingAnomalyDetector(FilteringStepPostprocessor):
"""An anomaly detector which guards model state against outliers.
Smoothly interpolates between a model's predicted and inferred states, based
on the posterior probability of an anomaly, p(anomaly | data). This is useful
if anomalies would otherwise lead to model state which is hard to recover
from (Gaussian state space models suffer from this, for example).
Relies on (1) an alternative distribution, typically with heavier tails than
the model's normal predictions, and (2) a prior probability of an anomaly. The
prior probability acts as a penalty, discouraging the system from marking too
many points as anomalies. The alternative distribution indicates the
probability of a datapoint given that it is an anomaly, and is a heavy-tailed
distribution (Cauchy) centered around the model's predictions by default.
Specifically, we have:
p(anomaly | data) = p(data | anomaly) * anomaly_prior_probability
/ (p(data | not anomaly) * (1 - anomaly_prior_probability)
+ p(data | anomaly) * anomaly_prior_probability)
This is simply Bayes' theorem, where p(data | anomaly) is the
alternative/anomaly distribution, p(data | not anomaly) is the model's
predicted distribution, and anomaly_prior_probability is the prior probability
of an anomaly occurring (user-specified, defaulting to 1%).
Rather than computing p(anomaly | data) directly, we use the odds ratio:
odds_ratio = p(data | anomaly) * anomaly_prior_probability
/ (p(data | not anomaly) * (1 - anomaly_prior_probability))
This has the same information as p(anomaly | data):
odds_ratio = p(anomaly | data) / p(not anomaly | data)
A "responsibility" score is computed for the model based on the log odds
ratio, and state interpolated based on this responsibility:
model_responsibility = 1 / (1 + exp(-responsibility_scaling
* ln(odds_ratio)))
model_state = filtered_model_state * model_responsibility
+ predicted_model_state * (1 - model_responsibility)
loss = model_responsibility
* ln(p(data | not anomaly) * (1 - anomaly_prior_probability))
+ (1 - model_responsibility)
* ln(p(data | anomaly) * anomaly_prior_probability)
"""
output_names = ["anomaly_score"]
def __init__(self,
anomaly_log_likelihood=cauchy_alternative_to_gaussian,
anomaly_prior_probability=0.01,
responsibility_scaling=1.0):
"""Configure the anomaly detector.
Args:
anomaly_log_likelihood: A function taking `current_times`,
`current_values`, and `outputs` (same as the corresponding arguments
to process_filtering_step) and returning a [batch size] Tensor of log
likelihoods under an anomaly distribution.
anomaly_prior_probability: A scalar value, between 0 and 1, indicating the
prior probability of a particular example being an anomaly.
responsibility_scaling: A positive scalar controlling how fast
interpolation transitions between not-anomaly and anomaly; lower
values (closer to 0) create a smoother/slower transition.
"""
self._anomaly_log_likelihood = anomaly_log_likelihood
self._responsibility_scaling = responsibility_scaling
self._anomaly_prior_probability = anomaly_prior_probability
def process_filtering_step(self, current_times, current_values,
predicted_state, filtered_state, outputs):
"""Fall back on `predicted_state` for anomalies.
Args:
current_times: A [batch size] integer Tensor of times.
current_values: A [batch size x num features] Tensor of values filtering
is being performed on.
predicted_state: A (possibly nested) list of Tensors indicating model
state which does not take `current_times` and `current_values` into
account.
filtered_state: Same structure as predicted_state, but updated to take
`current_times` and `current_values` into account.
outputs: A dictionary of outputs produced by model filtering. Must
include `log_likelihood`, a [batch size] Tensor indicating the log
likelihood of the observations under the model's predictions.
Returns:
A tuple of (new_state, updated_outputs);
new_state: Updated state with the same structure as `filtered_state` and
`predicted_state`; predicted_state for anomalies and filtered_state
otherwise (per batch element).
updated_outputs: The `outputs` dictionary, updated with a new "loss"
(the interpolated negative log likelihoods under the model and
anomaly distributions) and "anomaly_score" (the log odds ratio of
each part of the batch being an anomaly).
"""
anomaly_log_likelihood = self._anomaly_log_likelihood(
current_times=current_times,
current_values=current_values,
outputs=outputs)
anomaly_prior_probability = ops.convert_to_tensor(
self._anomaly_prior_probability, dtype=current_values.dtype)
# p(data | anomaly) * p(anomaly)
data_and_anomaly_log_probability = (
anomaly_log_likelihood + math_ops.log(anomaly_prior_probability))
# p(data | no anomaly) * p(no anomaly)
data_and_no_anomaly_log_probability = (
outputs["log_likelihood"] + math_ops.log(1. - anomaly_prior_probability)
)
# A log odds ratio is slightly nicer here than computing p(anomaly | data),
# since it is centered around zero
anomaly_log_odds_ratio = (
data_and_anomaly_log_probability
- data_and_no_anomaly_log_probability)
model_responsibility = math_ops.sigmoid(-self._responsibility_scaling *
anomaly_log_odds_ratio)
# Do a linear interpolation between predicted and inferred model state
# based on the model's "responsibility". If we knew for sure whether
# this was an anomaly or not (binary responsibility), this would be the
# correct thing to do, but given that we don't it's just a
# (differentiable) heuristic.
interpolated_state = _interpolate_state_linear(
first_state=filtered_state,
second_state=predicted_state,
first_responsibility=model_responsibility)
# TODO(allenl): Try different responsibility scalings and interpolation
# methods (e.g. average in probability space rather than log space).
interpolated_log_likelihood = (
model_responsibility * data_and_no_anomaly_log_probability
+ (1. - model_responsibility) * data_and_anomaly_log_probability)
outputs["loss"] = -interpolated_log_likelihood
outputs["anomaly_score"] = anomaly_log_odds_ratio
return (interpolated_state, outputs)
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""Base classes for calliope commands and groups.
"""
import abc
from googlecloudsdk.calliope import usage_text
from googlecloudsdk.core import log
from googlecloudsdk.core.util import resource_printer
class LayoutException(Exception):
"""An exception for when a command or group .py file has the wrong types."""
class ReleaseTrackNotImplementedException(Exception):
"""An exception for when a command or group does not support a release track.
"""
class ReleaseTrack(object):
"""An enum representing the release track of a command or command group.
The release track controls where a command appears. The default of GA means
it will show up under gcloud. If you enable a command or group for the alpha
or beta tracks, those commands will be duplicated under those groups as well.
"""
class _TRACK(object):
"""An enum representing the release track of a command or command group."""
# pylint: disable=redefined-builtin
def __init__(self, id, prefix, help_tag, help_note):
self.id = id
self.prefix = prefix
self.help_tag = help_tag
self.help_note = help_note
def __str__(self):
return self.id
def __eq__(self, other):
return self.id == other.id
GA = _TRACK('GA', None, None, None)
BETA = _TRACK(
'BETA', 'beta',
'{0}(BETA){0} '.format(usage_text.MARKDOWN_BOLD),
'This command is currently in BETA and may change without notice.')
ALPHA = _TRACK(
'ALPHA', 'alpha',
'{0}(ALPHA){0} '.format(usage_text.MARKDOWN_BOLD),
'This command is currently in ALPHA and may change without notice.')
_ALL = [GA, BETA, ALPHA]
@staticmethod
def AllValues():
"""Gets all possible enum values.
Returns:
list, All the enum values.
"""
return list(ReleaseTrack._ALL)
class _Common(object):
"""Base class for Command and Group.
Attributes:
config: {str:object}, A set of key-value pairs that will persist (as long
as they are JSON-serializable) between command invocations. Can be used
for caching.
http_func: function that returns an http object that can be used during
service requests.
"""
__metaclass__ = abc.ABCMeta
_cli_generator = None
_is_hidden = False
_release_track = None
# TODO(user): Remove this once commands are only allowed to show up under
# the correct track (b/19406151)
_legacy_release_track = None
_valid_release_tracks = None
def __init__(self, http_func):
self._http_func = http_func
self.exit_code = 0
@staticmethod
def FromModule(module, release_track, is_command):
"""Get the type implementing CommandBase from the module.
Args:
module: module, The module resulting from importing the file containing a
command.
release_track: ReleaseTrack, The release track that we should load from
this module.
is_command: bool, True if we are loading a command, False to load a group.
Returns:
type, The custom class that implements CommandBase.
Raises:
LayoutException: If there is not exactly one type inheriting
CommonBase.
ReleaseTrackNotImplementedException: If there is no command or group
implementation for the request release track.
"""
return _Common._FromModule(
module.__file__, module.__dict__.values(), release_track, is_command)
@staticmethod
def _FromModule(mod_file, module_attributes, release_track, is_command):
"""Implementation of FromModule() made easier to test."""
commands = []
groups = []
# Collect all the registered groups and commands.
for command_or_group in module_attributes:
if issubclass(type(command_or_group), type):
if issubclass(command_or_group, Command):
commands.append(command_or_group)
elif issubclass(command_or_group, Group):
groups.append(command_or_group)
if is_command:
if groups:
# Ensure that there are no groups if we are expecting a command.
raise LayoutException(
'You cannot define groups [{0}] in a command file: [{1}]'
.format(', '.join([g.__name__ for g in groups]), mod_file))
if not commands:
# Make sure we found a command.
raise LayoutException('No commands defined in file: [{0}]'.format(
mod_file))
commands_or_groups = commands
else:
# Ensure that there are no commands if we are expecting a group.
if commands:
raise LayoutException(
'You cannot define commands [{0}] in a command group file: [{1}]'
.format(', '.join([c.__name__ for c in commands]), mod_file))
if not groups:
# Make sure we found a group.
raise LayoutException('No command groups defined in file: [{0}]'.format(
mod_file))
commands_or_groups = groups
# We found a single thing, if it's valid for this track, return it.
if len(commands_or_groups) == 1:
command_or_group = commands_or_groups[0]
valid_tracks = command_or_group.ValidReleaseTracks()
# If there is a single thing defined, and it does not declare any valid
# tracks, just assume it is enabled for all tracks that it's parent is.
if not valid_tracks or release_track in valid_tracks:
return command_or_group
raise ReleaseTrackNotImplementedException(
'No implementation for release track [{0}] in file: [{1}]'
.format(release_track.id, mod_file))
# There was more than one thing found, make sure there are no conflicts.
implemented_release_tracks = set()
for command_or_group in commands_or_groups:
valid_tracks = command_or_group.ValidReleaseTracks()
# When there are multiple definitions, they need to explicitly register
# their track to keep things sane.
if not valid_tracks:
raise LayoutException(
'Multiple {0}s defined in file: [{1}]. Each must explicitly '
'declare valid release tracks.'
.format('command' if is_command else 'group', mod_file))
# Make sure no two classes define the same track.
duplicates = implemented_release_tracks & valid_tracks
if duplicates:
raise LayoutException(
'Multiple definitions for release tracks [{0}] in file: [{1}]'
.format(', '.join([str(d) for d in duplicates]), mod_file))
implemented_release_tracks |= valid_tracks
valid_commands_or_groups = [i for i in commands_or_groups
if release_track in i.ValidReleaseTracks()]
# We know there is at most 1 because of the above check.
if len(valid_commands_or_groups) != 1:
raise ReleaseTrackNotImplementedException(
'No implementation for release track [{0}] in file: [{1}]'
.format(release_track.id, mod_file))
return valid_commands_or_groups[0]
@staticmethod
def Args(parser):
"""Set up arguments for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
pass
@classmethod
def IsHidden(cls):
return cls._is_hidden
@classmethod
def ReleaseTrack(cls, for_help=False):
# TODO(user): Remove for_help once commands are only allowed to show up
# under the correct track (b/19406151).
if for_help and cls._legacy_release_track:
return cls._legacy_release_track
return cls._release_track
@classmethod
def ValidReleaseTracks(cls):
return cls._valid_release_tracks
@classmethod
def GetExecutionFunction(cls, *args):
"""Get a fully bound function that will call another gcloud command.
This class method can be called at any time to generate a function that will
execute another gcloud command. The function itself can only be executed
after the gcloud CLI has been build i.e. after all Args methods have
been called.
Args:
*args: str, The args for the command to execute. Each token should be a
separate string and the tokens should start from after the 'gcloud'
part of the invocation.
Returns:
A bound function to call the gcloud command.
"""
def ExecFunc():
return cls._cli_generator.Generate().Execute(list(args),
call_arg_complete=False)
return ExecFunc
@classmethod
def GetCLIGenerator(cls):
"""Get a generator function that can be used to execute a gcloud command.
Returns:
A bound generator function to execute a gcloud command.
"""
return cls._cli_generator.Generate
def Http(self, auth=True, creds=None):
"""Get the http object to be used during service requests.
Args:
auth: bool, True if the http object returned should be authorized.
creds: oauth2client.client.Credentials, If auth is True and creds is not
None, use those credentials to authorize the httplib2.Http object.
Returns:
httplib2.Http, http object to be used during service requests.
"""
return self._http_func(auth=auth, creds=creds)
class Command(_Common):
"""Command is a base class for commands to implement.
Attributes:
cli: calliope.cli.CLI, The CLI object representing this command line tool.
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
group: base.Group, The instance of the group class above this command. You
can use this to access common methods within a group.
format: func(obj), A function that prints objects to stdout using the
user-chosen formatting option.
http_func: function that returns an http object that can be used during
service requests.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, cli, context, group, http_func, format_string):
super(Command, self).__init__(http_func)
self.cli = cli
self.context = context
self.group = group
self.__format_string = format_string
def ExecuteCommand(self, args):
self.cli.Execute(args, call_arg_complete=False)
@abc.abstractmethod
def Run(self, args):
"""Run the command.
Args:
args: argparse.Namespace, An object that contains the values for the
arguments specified in the .Args() method.
Returns:
A python object that is given back to the python caller, or sent to the
.Display() method in CLI mode.
"""
raise NotImplementedError('CommandBase.Run is not overridden')
def Display(self, args, result):
"""Print the result for a human to read from the terminal.
Args:
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
result: object, The object return by the corresponding .Run() invocation.
"""
pass
# TODO(user): When the formatting revamp goes in, this should be renamed.
# pylint: disable=invalid-name
def format(self, obj):
"""Prints out the given object using the format decided by the format flag.
Args:
obj: Object, The object to print.
"""
if obj:
resource_printer.Print(obj, self.__format_string, out=log.out)
class Group(_Common):
"""Group is a base class for groups to implement.
Attributes:
http_func: function that returns an http object that can be used during
service requests.
"""
def __init__(self, http_func):
super(Group, self).__init__(http_func)
def Filter(self, context, args):
"""Modify the context that will be given to this group's commands when run.
Args:
context: {str:object}, A set of key-value pairs that can be used for
common initialization among commands.
args: argparse.Namespace: The same namespace given to the corresponding
.Run() invocation.
"""
pass
class Argument(object):
"""A class that allows you to save an argument configuration for reuse."""
def __init__(self, *args, **kwargs):
"""Creates the argument.
Args:
*args: The positional args to parser.add_argument.
**kwargs: The keyword args to parser.add_argument.
"""
try:
self.__detailed_help = kwargs.pop('detailed_help')
except KeyError:
self.__detailed_help = None
self.__args = args
self.__kwargs = kwargs
def AddToParser(self, parser):
"""Adds this argument to the given parser.
Args:
parser: The argparse parser.
Returns:
The result of parser.add_argument().
"""
arg = parser.add_argument(*self.__args, **self.__kwargs)
if self.__detailed_help:
arg.detailed_help = self.__detailed_help
return arg
def Hidden(cmd_class):
"""Decorator for hiding calliope commands and groups.
Decorate a subclass of base.Command or base.Group with this function, and the
decorated command or group will not show up in help text.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._is_hidden = True
return cmd_class
# TODO(user): Remove this once commands are only allowed to show up under
# the correct track (b/19406151).
def Alpha(cmd_class):
"""Decorator for annotating a command or group as ALPHA.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._legacy_release_track = ReleaseTrack.ALPHA
return cmd_class
# TODO(user): Remove this once commands are only allowed to show up under
# the correct track (b/19406151)
def Beta(cmd_class):
"""Decorator for annotating a command or group as BETA.
Args:
cmd_class: base._Common, A calliope command or group.
Returns:
A modified version of the provided class.
"""
# pylint: disable=protected-access
cmd_class._legacy_release_track = ReleaseTrack.BETA
return cmd_class
def ReleaseTracks(*tracks):
"""Mark this class as the command implementation for the given release tracks.
Args:
*tracks: [ReleaseTrack], A list of release tracks that this is valid for.
Returns:
The decorated function.
"""
def ApplyReleaseTracks(cmd_class):
"""Wrapper function for the decorator."""
# pylint: disable=protected-access
cmd_class._valid_release_tracks = set(tracks)
return cmd_class
return ApplyReleaseTracks
|
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates face detection, label detection,
explicit content, and shot change detection using the Google Cloud API.
Usage Examples:
python analyze.py faces gs://demomaker/google_gmail.mp4
python analyze.py labels gs://cloud-ml-sandbox/video/chicago.mp4
python analyze.py labels_file resources/cat.mp4
python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4
python analyze.py explicit_content gs://demomaker/gbikes_dinosaur.mp4
"""
import argparse
import base64
import io
import sys
import time
from google.cloud import videointelligence_v1beta2
from google.cloud.videointelligence_v1beta2 import enums
from google.cloud.videointelligence_v1beta2 import types
def analyze_explicit_content(path):
""" Detects explicit content from the GCS path to a video. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.EXPLICIT_CONTENT_DETECTION]
operation = video_client.annotate_video(path, features)
print('\nProcessing video for explicit content annotations:')
while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)
print('\nFinished processing.')
# first result is retrieved because a single video was processed
explicit_annotation = (operation.result().annotation_results[0].
explicit_annotation)
likely_string = ("Unknown", "Very unlikely", "Unlikely", "Possible",
"Likely", "Very likely")
for frame in explicit_annotation.frames:
frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
print('Time: {}s'.format(frame_time))
print('\tpornography: {}'.format(
likely_string[frame.pornography_likelihood]))
def analyze_faces(path):
""" Detects faces given a GCS path. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.FACE_DETECTION]
config = types.FaceDetectionConfig(include_bounding_boxes=True)
context = types.VideoContext(face_detection_config=config)
operation = video_client.annotate_video(
path, features, video_context=context)
print('\nProcessing video for face annotations:')
while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)
print('\nFinished processing.')
# first result is retrieved because a single video was processed
face_annotations = (operation.result().annotation_results[0].
face_annotations)
for face_id, face in enumerate(face_annotations):
print('Face {}'.format(face_id))
print('Thumbnail size: {}'.format(len(face.thumbnail)))
for segment_id, segment in enumerate(face.segments):
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
print('\tSegment {}: {}'.format(segment_id, positions))
# There are typically many frames for each face,
# here we print information on only the first frame.
frame = face.frames[0]
time_offset = (frame.time_offset.seconds +
frame.time_offset.nanos / 1e9)
box = frame.normalized_bounding_boxes[0]
print('First frame time offset: {}s'.format(time_offset))
print('First frame normalized bounding box:')
print('\tleft: {}'.format(box.left))
print('\ttop: {}'.format(box.top))
print('\tright: {}'.format(box.right))
print('\tbottom: {}'.format(box.bottom))
print('\n')
def analyze_labels(path):
""" Detects labels given a GCS path. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.LABEL_DETECTION]
config = types.LabelDetectionConfig(
label_detection_mode=enums.LabelDetectionMode.SHOT_AND_FRAME_MODE)
context = types.VideoContext(label_detection_config=config)
operation = video_client.annotate_video(
path, features, video_context=context)
print('\nProcessing video for label annotations:')
while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)
print('\nFinished processing.')
# first result is retrieved because a single video was processed
results = operation.result().annotation_results[0]
# Process video/segment level label annotations
for i, segment_label in enumerate(results.segment_label_annotations):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
for i, segment in enumerate(segment_label.segments):
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = segment.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# Process shot level label annotations
for i, shot_label in enumerate(results.shot_label_annotations):
print('Shot label description: {}'.format(
shot_label.entity.description))
for category_entity in shot_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
for i, shot in enumerate(shot_label.segments):
start_time = (shot.segment.start_time_offset.seconds +
shot.segment.start_time_offset.nanos / 1e9)
end_time = (shot.segment.end_time_offset.seconds +
shot.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = shot.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# Process frame level label annotations
for i, frame_label in enumerate(results.frame_label_annotations):
print('Frame label description: {}'.format(
frame_label.entity.description))
for category_entity in frame_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
# Each frame_label_annotation has many frames,
# here we print information only about the first frame.
frame = frame_label.frames[0]
time_offset = (frame.time_offset.seconds +
frame.time_offset.nanos / 1e9)
print('\tFirst frame time offset: {}s'.format(time_offset))
print('\tFirst frame confidence: {}'.format(frame.confidence))
print('\n')
def analyze_labels_file(path):
""" Detects labels given a file path. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.LABEL_DETECTION]
with io.open(path, "rb") as movie:
content_base64 = base64.b64encode(movie.read())
operation = video_client.annotate_video(
'', features, input_content=content_base64)
print('\nProcessing video for label annotations:')
while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)
print('\nFinished processing.')
# first result is retrieved because a single video was processed
results = operation.result().annotation_results[0]
# Process video/segment level label annotations
for i, segment_label in enumerate(results.segment_label_annotations):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
for i, segment in enumerate(segment_label.segments):
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = segment.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# Process shot level label annotations
for i, shot_label in enumerate(results.shot_label_annotations):
print('Shot label description: {}'.format(
shot_label.entity.description))
for category_entity in shot_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
for i, shot in enumerate(shot_label.segments):
start_time = (shot.segment.start_time_offset.seconds +
shot.segment.start_time_offset.nanos / 1e9)
end_time = (shot.segment.end_time_offset.seconds +
shot.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = shot.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# Process frame level label annotations
for i, frame_label in enumerate(results.frame_label_annotations):
print('Frame label description: {}'.format(
frame_label.entity.description))
for category_entity in frame_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
# Each frame_label_annotation has many frames,
# here we print information only about the first frame.
frame = frame_label.frames[0]
time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
print('\tFirst frame time offset: {}s'.format(time_offset))
print('\tFirst frame confidence: {}'.format(frame.confidence))
print('\n')
def analyze_shots(path):
""" Detects camera shot changes. """
video_client = videointelligence_v1beta2.VideoIntelligenceServiceClient()
features = [enums.Feature.SHOT_CHANGE_DETECTION]
operation = video_client.annotate_video(path, features)
print('\nProcessing video for shot change annotations:')
while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)
print('\nFinished processing.')
# first result is retrieved because a single video was processed
shots = operation.result().annotation_results[0].shot_annotations
for i, shot in enumerate(shots):
start_time = (shot.start_time_offset.seconds +
shot.start_time_offset.nanos / 1e9)
end_time = (shot.end_time_offset.seconds +
shot.end_time_offset.nanos / 1e9)
print('\tShot {}: {} to {}'.format(i, start_time, end_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
subparsers = parser.add_subparsers(dest='command')
analyze_faces_parser = subparsers.add_parser(
'faces', help=analyze_faces.__doc__)
analyze_faces_parser.add_argument('path')
analyze_labels_parser = subparsers.add_parser(
'labels', help=analyze_labels.__doc__)
analyze_labels_parser.add_argument('path')
analyze_labels_file_parser = subparsers.add_parser(
'labels_file', help=analyze_labels_file.__doc__)
analyze_labels_file_parser.add_argument('path')
analyze_explicit_content_parser = subparsers.add_parser(
'explicit_content', help=analyze_explicit_content.__doc__)
analyze_explicit_content_parser.add_argument('path')
analyze_shots_parser = subparsers.add_parser(
'shots', help=analyze_shots.__doc__)
analyze_shots_parser.add_argument('path')
args = parser.parse_args()
if args.command == 'faces':
analyze_faces(args.path)
if args.command == 'labels':
analyze_labels(args.path)
if args.command == 'labels_file':
analyze_labels_file(args.path)
if args.command == 'shots':
analyze_shots(args.path)
if args.command == 'explicit_content':
analyze_explicit_content(args.path)
|
|
import math
import os
import shutil
import tempfile
import unittest
import mleap.pyspark # noqa
from mleap.pyspark.spark_support import SimpleSparkSerializer # noqa
import pandas as pd
from pandas.testing import assert_frame_equal
from pyspark.ml import Pipeline
from pyspark.sql.types import FloatType
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
from mleap.pyspark.feature.math_binary import MathBinary
from mleap.pyspark.feature.math_binary import BinaryOperation
from tests.pyspark.lib.spark_session import spark_session
INPUT_SCHEMA = StructType([
StructField('f1', FloatType()),
StructField('f2', FloatType()),
])
class MathBinaryTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.spark = spark_session()
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def setUp(self):
self.input = self.spark.createDataFrame([
(
float(i),
float(i * 2),
)
for i in range(1, 10)
], INPUT_SCHEMA)
self.expected_add = pd.DataFrame(
[(
float(i + i * 2)
)
for i in range(1, 10)],
columns=['add(f1, f2)'],
)
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def _new_add_math_binary(self):
return MathBinary(
operation=BinaryOperation.Add,
inputA="f1",
inputB="f2",
outputCol="add(f1, f2)",
)
def test_add_math_binary(self):
add_transformer = self._new_add_math_binary()
result = add_transformer.transform(self.input).toPandas()[['add(f1, f2)']]
assert_frame_equal(self.expected_add, result)
def test_math_binary_pipeline(self):
add_transformer = self._new_add_math_binary()
mul_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
inputB="add(f1, f2)",
outputCol="mul(f1, add(f1, f2))",
)
expected = pd.DataFrame(
[(
float(i * (i + i * 2))
)
for i in range(1, 10)],
columns=['mul(f1, add(f1, f2))'],
)
pipeline = Pipeline(
stages=[add_transformer, mul_transformer]
)
pipeline_model = pipeline.fit(self.input)
result = pipeline_model.transform(self.input).toPandas()[['mul(f1, add(f1, f2))']]
assert_frame_equal(expected, result)
def test_can_instantiate_all_math_binary(self):
for binary_operation in BinaryOperation:
transformer = MathBinary(
operation=binary_operation,
inputA="f1",
inputB="f2",
outputCol="operation",
)
def test_serialize_deserialize_math_binary(self):
add_transformer = self._new_add_math_binary()
file_path = '{}{}'.format('jar:file:', os.path.join(self.tmp_dir, 'math_binary.zip'))
add_transformer.serializeToBundle(file_path, self.input)
deserialized_math_binary = SimpleSparkSerializer().deserializeFromBundle(file_path)
result = deserialized_math_binary.transform(self.input).toPandas()[['add(f1, f2)']]
assert_frame_equal(self.expected_add, result)
def test_serialize_deserialize_pipeline(self):
add_transformer = self._new_add_math_binary()
mul_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
inputB="add(f1, f2)",
outputCol="mul(f1, add(f1, f2))",
)
expected = pd.DataFrame(
[(
float(i * (i + i * 2))
)
for i in range(1, 10)],
columns=['mul(f1, add(f1, f2))'],
)
pipeline = Pipeline(
stages=[add_transformer, mul_transformer]
)
pipeline_model = pipeline.fit(self.input)
file_path = '{}{}'.format('jar:file:', os.path.join(self.tmp_dir, 'math_binary_pipeline.zip'))
pipeline_model.serializeToBundle(file_path, self.input)
deserialized_pipeline = SimpleSparkSerializer().deserializeFromBundle(file_path)
result = pipeline_model.transform(self.input).toPandas()[['mul(f1, add(f1, f2))']]
assert_frame_equal(expected, result)
def test_add_math_binary_defaults_none(self):
add_transformer = self._new_add_math_binary()
none_df = self.spark.createDataFrame([
(None, float(i * 2))
for i in range(1, 3)
], INPUT_SCHEMA)
# Summing None + int yields Nones
expected_df = pd.DataFrame([
(None,)
for i in range(1, 3)
], columns=['add(f1, f2)'])
result = add_transformer.transform(none_df).toPandas()[['add(f1, f2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_inputA(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputB="f2",
outputCol="mult(1, f2)",
defaultA=1.0,
)
none_df = self.spark.createDataFrame([
(None, float(i * 1234))
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(i * 1234), )
for i in range(1, 3)
], columns=['mult(1, f2)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(1, f2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_inputB(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
inputA="f1",
outputCol="mult(f1, 2)",
defaultB=2.0,
)
none_df = self.spark.createDataFrame([
(float(i * 1234), None)
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(i * 1234 * 2), )
for i in range(1, 3)
], columns=['mult(f1, 2)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(f1, 2)']]
assert_frame_equal(expected_df, result)
def test_mult_math_binary_default_both(self):
mult_transformer = MathBinary(
operation=BinaryOperation.Multiply,
outputCol="mult(7, 8)",
defaultA=7.0,
defaultB=8.0,
)
none_df = self.spark.createDataFrame([
(None, None)
for i in range(1, 3)
], INPUT_SCHEMA)
expected_df = pd.DataFrame([
(float(7 * 8), )
for i in range(1, 3)
], columns=['mult(7, 8)'])
result = mult_transformer.transform(none_df).toPandas()[['mult(7, 8)']]
assert_frame_equal(expected_df, result)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mask-RCNN anchor definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
EPSILON = 1e-8
BBOX_XFORM_CLIP = np.log(1000. / 16.)
def top_k(scores, k, tensors):
"""A wrapper that returns top-k scores and corresponding boxes.
This functions selects the top-k scores and boxes as follows.
indices = argsort(scores)[:k]
scores = scores[indices]
outputs = []
for tensor in tensors:
outputs.append(tensor[indices, :])
return scores, outputs
Args:
scores: a tensor with a shape of [batch_size, N]. N is the number of scores.
k: an integer for selecting the top-k elements.
tensors: a list containing at least one element. Each element has a shape
of [batch_size, N, 4] or [batch_size, N, 1].
Returns:
scores: the selected top-k scores with a shape of [batch_size, k].
outputs: the list containing the corresponding boxes in the order of the
input `boxes_list`.
Raises:
AssertionError: if boxes_list is not a list or is empty.
"""
if not isinstance(tensors, list):
raise AssertionError('tensors is not a list')
if not tensors:
raise AssertionError('tensors is empty')
with tf.name_scope('top_k_wrapper'):
scores, top_k_indices = tf.nn.top_k(scores, k=k)
batch_size, _ = scores.get_shape().as_list()
outputs = []
for tensor in tensors:
_, _, minor_dim = tensor.get_shape().as_list()
index_offsets = tf.range(batch_size) * tf.shape(tensor)[1]
indices = tf.reshape(top_k_indices +
tf.expand_dims(index_offsets, 1), [-1])
tensor = tf.reshape(
tf.gather(tf.reshape(tensor, [-1, minor_dim]), indices),
[batch_size, -1, minor_dim])
outputs.append(tensor)
return scores, outputs
def bbox_overlap(boxes, gt_boxes):
"""Calculates the overlap between proposal and ground truth boxes.
Some `gt_boxes` may have been padded. The returned `iou` tensor for these
boxes will be -1.
Args:
boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of
proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The
last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form.
gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This
tensor might have paddings with a negative value.
Returns:
iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].
"""
with tf.name_scope('bbox_overlap'):
bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(
value=gt_boxes, num_or_size_splits=4, axis=2)
# Calculates the intersection area.
i_xmin = tf.maximum(bb_x_min, tf.transpose(gt_x_min, [0, 2, 1]))
i_xmax = tf.minimum(bb_x_max, tf.transpose(gt_x_max, [0, 2, 1]))
i_ymin = tf.maximum(bb_y_min, tf.transpose(gt_y_min, [0, 2, 1]))
i_ymax = tf.minimum(bb_y_max, tf.transpose(gt_y_max, [0, 2, 1]))
i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0)
# Calculates the union area.
bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min)
gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min)
# Adds a small epsilon to avoid divide-by-zero.
u_area = bb_area + tf.transpose(gt_area, [0, 2, 1]) - i_area + EPSILON
# Calculates IoU.
iou = i_area / u_area
# Fills -1 for padded ground truth boxes.
padding_mask = tf.less(i_xmin, tf.zeros_like(i_xmin))
iou = tf.where(padding_mask, -tf.ones_like(iou), iou)
return iou
def filter_boxes(scores, boxes, rpn_min_size, image_info):
"""Filters boxes whose height or width is smaller than rpn_min_size.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/ops/generate_proposals.py # pylint: disable=line-too-long
Args:
scores: a tensor with a shape of [batch_size, N].
boxes: a tensor with a shape of [batch_size, N, 4]. The proposals
are in pixel coordinates.
rpn_min_size: a integer that represents the smallest length of the image
height or width.
image_info: a tensor of shape [batch_size, 5] where the three columns
encode the input image's [height, width, scale,
original_height, original_width]. `scale` is the scale
factor used to scale the network input size to the original image size.
See dataloader.DetectionInputProcessor for details.
Returns:
scores: a tensor with a shape of [batch_size, anchors]. Same shape and dtype
as input scores.
proposals: a tensor with a shape of [batch_size, anchors, 4]. Same shape and
dtype as input boxes.
"""
with tf.name_scope('filter_boxes'):
y_min, x_min, y_max, x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
image_info = tf.cast(tf.expand_dims(image_info, axis=2), dtype=boxes.dtype)
# The following tensors have a shape of [batch_size, 1, 1].
image_height = image_info[:, 0:1, :]
image_width = image_info[:, 1:2, :]
image_scale = image_info[:, 2:3, :]
min_size = tf.cast(tf.maximum(rpn_min_size, 1), dtype=boxes.dtype)
# Proposal center is computed relative to the scaled input image.
hs = y_max - y_min + 1
ws = x_max - x_min + 1
y_ctr = y_min + hs / 2
x_ctr = x_min + ws / 2
height_mask = tf.greater_equal(hs, min_size * image_scale)
width_mask = tf.greater_equal(ws, min_size * image_scale)
center_mask = tf.logical_and(
tf.less(y_ctr, image_height), tf.less(x_ctr, image_width))
mask = tf.logical_and(tf.logical_and(height_mask, width_mask),
center_mask)[:, :, 0]
scores = tf.where(mask, scores, tf.zeros_like(scores))
boxes = tf.cast(tf.expand_dims(mask, 2), boxes.dtype) * boxes
return scores, boxes
def clip_boxes(boxes, image_shapes):
"""Clips boxes to image boundaries.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L132 # pylint: disable=line-too-long
Args:
boxes: a tensor with a shape [batch_size, N, 4].
image_shapes: a tensor with a shape of [batch_size, 2]; the last dimension
represents [height, width].
Returns:
clipped_boxes: the clipped boxes. Same shape and dtype as input boxes.
Raises:
ValueError: If boxes is not a rank-3 tensor or the last dimension of
boxes is not 4.
"""
if boxes.shape.ndims != 3:
raise ValueError('boxes must be of rank 3.')
if boxes.shape[2] != 4:
raise ValueError(
'boxes.shape[1] is {:d}, but must be divisible by 4.'.format(
boxes.shape[1])
)
with tf.name_scope('clip_boxes'):
y_min, x_min, y_max, x_max = tf.split(
value=boxes, num_or_size_splits=4, axis=2)
# Manipulates the minimum and maximum so that type and shape match.
image_shapes = tf.cast(
tf.expand_dims(image_shapes, axis=2), dtype=boxes.dtype)
# The following tensors have a shape of [batch_size, 1, 1].
win_y_min = tf.zeros_like(image_shapes[:, 0:1, :])
win_x_min = tf.zeros_like(image_shapes[:, 0:1, :])
win_y_max = image_shapes[:, 0:1, :]
win_x_max = image_shapes[:, 1:2, :]
y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max - 1), win_y_min)
y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max - 1), win_y_min)
x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max - 1), win_x_min)
x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max - 1), win_x_min)
clipped_boxes = tf.concat(
[y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
axis=2)
return clipped_boxes
def batch_decode_box_outputs_op(boxes, delta, weights=None):
"""Transforms relative regression coordinates to absolute positions.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L150 # pylint: disable=line-too-long
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
boxes: corresponding anchors with a shape of [batch_size, N, 4], which is
in [y_min, x_min, y_max, x_max] form.
delta: box regression targets with a shape of [batch_size, N, 4].
weights: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. The reference implementation
uses [10.0, 10.0, 5.0, 5.0].
Returns:
outputs: bounding boxes.
"""
if weights:
assert len(weights) == 4
for scalar in weights:
assert scalar > 0
delta = tf.cast(delta, dtype=boxes.dtype)
heights = boxes[:, :, 2] - boxes[:, :, 0] + 1.0
widths = boxes[:, :, 3] - boxes[:, :, 1] + 1.0
ctr_y = boxes[:, :, 0] + 0.5 * heights
ctr_x = boxes[:, :, 1] + 0.5 * widths
dy = delta[:, :, 0]
dx = delta[:, :, 1]
dh = delta[:, :, 2]
dw = delta[:, :, 3]
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
# Prevent sending too large values into tf.exp()
dw = tf.minimum(dw, BBOX_XFORM_CLIP)
dh = tf.minimum(dh, BBOX_XFORM_CLIP)
pred_ctr_x = dx * widths + ctr_x
pred_ctr_y = dy * heights + ctr_y
pred_h = tf.exp(dh) * heights
pred_w = tf.exp(dw) * widths
# ymin
ymin = pred_ctr_y - 0.5 * pred_h
# xmin
xmin = pred_ctr_x - 0.5 * pred_w
# ymax (note: "- 1" is correct; don't be fooled by the asymmetry)
ymax = pred_ctr_y + 0.5 * pred_h - 1
# xmax (note: "- 1" is correct; don't be fooled by the asymmetry)
xmax = pred_ctr_x + 0.5 * pred_w - 1
return tf.stack([ymin, xmin, ymax, xmax], axis=2)
def batch_encode_box_targets_op(boxes, gt_boxes, weights=None):
"""Transforms box target given proposal and ground-truth boxes.
Network predictions are normalized and relative to a given anchor (or a ground
truth box). Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L193 # pylint: disable=line-too-long
Args:
boxes: anchors with a shape of [batch_size, N, 4]. Both
boxes are in [y_min, x_min, y_max, x_max] form.
gt_boxes: corresponding ground truth boxes with a shape of
[batch_size, N, 4].
weights: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. The reference implementation
uses [10.0, 10.0, 5.0, 5.0].
Returns:
outputs: encoded box targets.
"""
if weights:
assert len(weights) == 4
for scalar in weights:
assert scalar > 0
ex_heights = boxes[:, :, 2] - boxes[:, :, 0] + 1.0
ex_widths = boxes[:, :, 3] - boxes[:, :, 1] + 1.0
ex_ctr_y = boxes[:, :, 0] + 0.5 * ex_heights
ex_ctr_x = boxes[:, :, 1] + 0.5 * ex_widths
gt_heights = gt_boxes[:, :, 2] - gt_boxes[:, :, 0] + 1.0
gt_widths = gt_boxes[:, :, 3] - gt_boxes[:, :, 1] + 1.0
gt_ctr_y = gt_boxes[:, :, 0] + 0.5 * gt_heights
gt_ctr_x = gt_boxes[:, :, 1] + 0.5 * gt_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dh = tf.log(gt_heights / ex_heights)
targets_dw = tf.log(gt_widths / ex_widths)
if weights:
targets_dy *= weights[0]
targets_dx *= weights[1]
targets_dh *= weights[2]
targets_dw *= weights[3]
return tf.stack([targets_dy, targets_dx, targets_dh, targets_dw], axis=2)
|
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
import re
import weakref
from threading import local, Thread
from ..compat import implements_to_string, text_type, string_types, number_types
from . import dataindex
from .dataindex import parse, join, join_parsed, is_from_root
from .expression import Expression
from .errors import ContextKeyError, SubstitutionError
from .missing import Missing
from .tools import to_expression
from ..tools import lazystr
from ..moyaexceptions import MoyaException
@implements_to_string
class DynamicContextItem(object):
"""A proxy for a dynamic item"""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
super(DynamicContextItem, self).__init__()
def __moyacontext__(self, context):
return self.callable(context, *self.args, **self.kwargs)
@property
def obj(self):
from moya import pilot
return self.__moyacontext__(pilot.context)
def __str__(self):
return text_type(self.obj)
def __repr__(self):
return repr(self.obj)
def __moyarepr__(self, context):
return to_expression(context, self.obj)
class CounterContextItem(object):
def __init__(self, start):
self.value = start
def __moyacontext__(self, context):
ret = self.value
self.value += 1
return ret
def __repr__(self):
return text_type(self.value)
def __moyarepr(self, context):
return to_expression(context, self.value)
@implements_to_string
class LazyContextItem(object):
"""A proxy for a lazily evaluated object"""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
self.called = False
self.result = None
super(LazyContextItem, self).__init__()
def __moyacontext__(self, context):
if not self.called:
self.result = self.callable(*self.args, **self.kwargs)
self.called = True
return self.result
@property
def obj(self):
return self.__moyacontext__(None)
def __str__(self):
return text_type(self.obj)
def __repr__(self):
return repr(self.obj)
def __moyarepr__(self, context):
return to_expression(context, self.obj)
@implements_to_string
class AsyncContextItem(Thread):
"""A proxy for an asynchronously evaluated object"""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
self._obj = None
super(AsyncContextItem, self).__init__()
self.start()
def will_block(self):
"""Check if accessing this object will block"""
return self.is_alive()
def run(self):
self._obj = self.callable(*self.args, **self.kwargs)
def __moyacontext__(self, context):
self.join()
return self._obj
@property
def obj(self):
return self.__moyacontext__(None)
def __str__(self):
return text_type(self._obj)
def __repr__(self):
return repr(self._obj)
def __moyarepr__(self, context):
return to_expression(context, self._obj)
@implements_to_string
class _ThreadLocalItem(object):
"""A proxy for a thread local object"""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
self.local = local()
def __moyacontext__(self, context):
obj = getattr(self.local, "obj", None)
if not obj:
obj = self.callable(*self.args, **self.kwargs)
setattr(self.local, "obj", obj)
return obj
@property
def obj(self):
return self.__moyacontext__(None)
def __str__(self):
return text_type(self.obj)
def __repr__(self):
return repr(self.obj)
def __moyarepr__(self, context):
return to_expression(context, self.obj)
class LinkItem(object):
"""Links on index to another, like a symbolic link"""
__slots__ = ["proxy_index"]
def __init__(self, proxy_index):
self.proxy_index = dataindex.parse(proxy_index)
def __repr__(self):
return "<link %s>" % self.proxy_index
def __moyacontext__(self, context):
return context[self.proxy_index]
class LastIndexItem(object):
"""Returns the last item of a sequence"""
def __init__(self, sequence_index, name):
self.sequence_index = dataindex.parse(sequence_index)
self.name = name
def __moyacontext__(self, context):
if self.sequence_index in context:
return context[self.sequence_index][-1]
return None
@implements_to_string
class Scope(object):
__slots__ = ["stack", "index", "obj"]
def __init__(self, stack, index, obj=None):
self.stack = stack
if obj is None:
obj = self.stack.context.get(index)
self.index = index
self.obj = obj
else:
self.index = index
self.obj = obj
def __repr__(self):
return "<Scope %s>" % self.index
def __str__(self):
return self.index
class Frame(object):
def __init__(self, stack, index, obj=None):
self.index = index
self.stack = stack
self.scopes = [Scope(stack, index, obj)]
self._push = self.scopes.append
self._pop = self.scopes.pop
self._update()
def _update(self):
self.last_scope = self.scopes[-1]
self.first_scope = self.scopes[0]
def copy(self):
frame = Frame.__new__(Frame)
frame.index = self.index
frame.stack = self.stack
frame.scopes = self.scopes[:]
frame._push = self.scopes.append
frame._pop = self.scopes.pop
frame._update()
return frame
def push_scope(self, index):
self._push(Scope(self.stack, index))
self._update()
def pop_scope(self):
self._pop()
self._update()
def __iter__(self):
return reversed(self.scopes)
def __len__(self):
return len(self.scopes)
def __repr__(self):
return '<frame "%s">' % self.index
class Stack(object):
def __init__(self, context, root_obj):
self._context = weakref.ref(context)
self.frames = [Frame(self, ".", root_obj)]
self._push = self.frames.append
self._pop = self.frames.pop
self._current_frame = self.frames[-1]
@property
def context(self):
return self._context()
def push_frame(self, index_or_frame):
if isinstance(index_or_frame, Frame):
self._push(index_or_frame)
else:
self._push(Frame(self, parse(index_or_frame)))
self._current_frame = self.frames[-1]
def pop_frame(self):
self._pop()
self._current_frame = self.frames[-1]
def clone_frame(self):
return self._current_frame.copy()
def reset(self):
del self.frames[1:]
self._current_frame = None
@property
def index(self):
"""Index of most recent scope"""
return self._current_frame.last_scope.index
@property
def index_set(self):
"""Index of first scope"""
return self._current_frame.first_scope.index
@property
def obj(self):
"""Most recent scope"""
return self._current_frame.last_scope.obj
@property
def obj_set(self):
"""First scope in frame"""
return self._current_frame.first_scope.obj
@property
def scope(self):
return self._current_frame.last_scope
@property
def frame(self):
return self._current_frame
@implements_to_string
class _FrameContext(object):
def __init__(self, context, *index):
self.context = context
self.index = join_parsed(*index)
def __enter__(self):
self.context.push_frame(self.index)
def __exit__(self, exc_type, exc_val, exc_tb):
self.context.pop_frame()
def __str__(self):
return self.index
class _ScopeContext(object):
def __init__(self, context, *index):
self.context = context
self.index = join_parsed(*index)
def __enter__(self):
self.context.push_scope(self.index)
def __exit__(self, exc_type, exc_val, exc_tb):
self.context.pop_scope()
class _TempScopeContext(_ScopeContext):
def __exit__(self, exc_type, exc_val, exc_tb):
self.context.pop_scope()
self.context.safe_delete(self.index)
class _DataScopeContext(object):
def __init__(self, context, data):
self.context = context
self.data = data
def __enter__(self):
scope_index = self.context.push_thread_local_stack("datascope", self.data)
self.context.push_scope(scope_index)
def __exit__(self, exc_type, exc_val, exc_tb):
self.context.pop_scope()
self.context.pop_stack("datascope")
class _DataFrameContext(object):
def __init__(self, context, data):
self.context = context
self.data = data
def __enter__(self):
scope_index = self.context.push_thread_local_stack("dataframe", self.data)
self.context.push_frame(scope_index)
def __exit__(self, exc_type, exc_val, exc_tb):
self.context.pop_frame()
self.context.pop_stack("dataframe")
class _StackContext(object):
"""This is a context manager for client stacks on the context"""
def __init__(self, context, stack_name, value, stack_callable=list):
self.context = context
self.stack_name = stack_name
self.value = value
self.stack_callable = stack_callable
def __enter__(self):
index = self.context.push_stack(
self.stack_name, self.value, stack_callable=self.stack_callable
)
self.context.push_frame(index)
return index
def __exit__(self, exc_type, exc_value, exc_tb):
self.context.pop_frame()
self.context.pop_stack(self.stack_name)
class _RootStackContext(object):
def __init__(self, context, stack_name, value, stack_callable=list):
self.context = context
self.stack_name = stack_name
self.value = value
self.stack_callable = stack_callable
def __enter__(self):
stack = self.context.set_new_call("._{}_stack".format(self.stack_name), list)
stack.append(self.value)
self.context["." + self.stack_name] = self.value
def __exit__(self, exc_type, exc_value, exc_tb):
stack = self.context["._{}_stack".format(self.stack_name)]
stack.pop()
try:
self.context["." + self.stack_name] = stack[-1]
except IndexError:
del self.context["." + self.stack_name]
class DummyLock(object):
"""Replacement for real lock that does nothing"""
def __enter__(self):
pass
def __exit__(self, *args, **kwargs):
pass
def _get_key(obj, key):
getitem = getattr(obj, "__getitem__")
if getitem is not None:
try:
return getitem(key)
except (TypeError, KeyError, IndexError):
return Ellipsis
else:
return getattr(obj, key, Ellipsis)
def synchronize(f):
f._synchronize = True
return f
class _DummyLocal(object):
def __init__(self, stack):
self.stack = stack
@implements_to_string
class Context(object):
"""A meta data structure for indexing nested Python objects"""
_re_substitute_context = re.compile(r"\$\{(.*?)\}")
_sub = _re_substitute_context.sub
def __init__(self, root=None, thread_safe=False, re_sub=None, name=None):
if root is None:
self.root = {}
else:
self.root = root
self.lock = None
if re_sub is not None:
self._sub = re.compile(re_sub).sub
self._stack = Stack(self, self.root)
self.thread_safe = False
if thread_safe:
self._make_thread_safe()
self.name = name
def _make_thread_safe(self):
if self.thread_safe:
return
for method_name in dir(self):
method = getattr(self, method_name)
if getattr(method, "_synchronize", False):
def make_sync(context, method):
def _sync(*args, **kwargs):
with context.lock:
return method(*args, **kwargs)
return _sync
setattr(self, method_name, make_sync(self, method))
self.thread_safe = True
@classmethod
def escape(cls, v):
return v.replace(".", "\\.")
def __repr__(self):
if self.name:
return "<context '{}'>".format(self.name)
else:
return "<context>"
def __str__(self):
if self.name:
return "<context '{}'>".format(self.name)
else:
return "<context>"
def to_expr(self, obj, max_size=200):
"""Convert an object to a context expression, if possible"""
return lazystr(to_expression, self, obj, max_size=max_size)
@property
def obj(self):
return self._stack.obj
def capture_scope(self):
"""Get an object that contains the data in the scope"""
obj = {}
for scope in reversed(self.current_frame.scopes):
scope_obj = scope.obj
if hasattr(scope_obj, "__getitem__") and hasattr(scope_obj, "items"):
for k, v in scope_obj.items():
if k not in obj:
obj[k] = v
else:
return scope_obj
return obj
@synchronize
def clone(self):
"""Creates a context with a shallow copy of the data"""
return Context(self.root.copy(), thread_safe=self.thread_safe)
@synchronize
def reset(self):
"""Reset stack"""
self._stack = Stack(self, self.root)
def substitute(self, s, process=text_type):
get_eval = Expression.get_eval
def sub(match):
try:
return process(get_eval(match.group(1), self))
except MoyaException:
raise
except Exception as e:
start, end = match.span(1)
raise SubstitutionError(match.group(1), start, end, original=e)
return self._sub(sub, s)
sub = substitute
@classmethod
def extract_expressions(cls, s):
"""Extract all expressions in substitution syntax"""
expressions = set(
match.group(1) for match in cls._re_substitute_context.finditer(s)
)
return expressions
def push_frame(self, index):
"""Push an index frame, so all relative indices will reference data under this index."""
stack = self._stack
if isinstance(index, Frame):
stack.push_frame(index)
else:
stack.push_frame(join_parsed(stack.index_set, index))
def pop_frame(self):
"""Pop an index frame from the stack"""
self._stack.pop_frame()
def frame(self, *index):
"""Context manager to push/pop index frames"""
return _FrameContext(self, *index)
@property
def current_frame(self):
return self._stack._current_frame
def _get_obj(self, index):
index = parse(index)
if is_from_root(index):
return self.root, index
return self._stack.obj, index
def _get_scope(self, index):
index = parse(index)
if is_from_root(index):
return [self.root], index
return [scope.obj for scope in self._stack.frame], index
def push_scope(self, index):
stack = self._stack
stack.frame.push_scope(join_parsed(stack.index, index))
def pop_scope(self):
self._stack.frame.pop_scope()
def scope(self, *index):
"""Returns a context manager for a scope"""
return _ScopeContext(self, *index)
def temp_scope(self, index):
"""Returns a context manager for a temporary scope (deletes scope index on exit)"""
return _TempScopeContext(self, index)
def get_frame(self):
"""Get the current frame"""
return text_type(self._stack.index)
def data_scope(self, data=dict):
"""Make a context manager to create a scope from arbitrary mapping"""
if callable(data):
data = data()
return _DataScopeContext(self, data)
def data_frame(self, data):
"""Make a context manager to create a frame from arbitrary mapping"""
return _DataFrameContext(self, data)
def _set_lookup(self, index, _parse=parse):
indices = _parse(index)
if indices.from_root:
obj = self.root
else:
obj = self._stack.obj
try:
final = indices.tokens[-1]
except IndexError:
raise
raise ContextKeyError(self, index, message="Can't set root!")
try:
for name in indices.tokens[:-1]:
obj = (
getattr(obj, "__getitem__", None)
or getattr(obj, "__getattribute__")
)(name)
__moyacontext__ = getattr(obj, "__moyacontext__", None)
if __moyacontext__:
obj = __moyacontext__(self)
except (KeyError, IndexError, AttributeError):
raise ContextKeyError(self, index)
return obj, final
def update(self, map):
"""Update current scope with key/values from a mapping object"""
for k, v in map.items():
self[k] = v
def update_base(self, map):
"""Update the base (first) scope"""
obj = self.current_frame.scopes[0].obj
for k, v in map.items():
obj[k] = v
@synchronize
def set(self, index, value):
"""Set a value"""
obj, final = self._set_lookup(index)
try:
(getattr(obj, "__setitem__", None) or getattr(obj, "__setattr__"))(
final, value
)
except Exception:
raise ContextKeyError(self, index)
@synchronize
def set_simple(self, index, value):
"""Set a single index"""
obj = self._stack.obj
try:
(getattr(obj, "__setitem__", None) or getattr(obj, "__setattr__"))(
index, value
)
except Exception:
raise ContextKeyError(self, index)
@synchronize
def set_multiple(self, seq):
"""Set many index / value pairs"""
_lookup = self._set_lookup
for index, value in seq:
obj, final = _lookup(index)
try:
(getattr(obj, "__setitem__", None) or getattr(obj, "__setattr__"))(
final, value
)
except Exception:
raise ContextKeyError(self, index)
@synchronize
def set_new(self, index, value):
"""Set a value if the index does not exist"""
if index not in self:
self[index] = value
return value
else:
return self[index]
@synchronize
def set_new_call(self, index, value_callable):
"""Set a value from a callable if it does not exist"""
if index not in self:
self[index] = value = value_callable()
return value
return self[index]
def set_dynamic(self, index, callable, *args, **kwargs):
"""Set a dynamic item (updates when references)"""
self.set(index, DynamicContextItem(callable, *args, **kwargs))
def set_counter(self, index, start=1):
"""Set a dynamic value that increments each time it is evaluated"""
self.set(index, CounterContextItem(start))
def set_lazy(self, index, callable, *args, **kwargs):
"""Associate a callable with an index. The callable is evaluated and the result
returned when the index is first referenced. Subsequent references use the
previously calculated result.
"""
self.set(index, LazyContextItem(callable, *args, **kwargs))
def set_async(self, index, callable, *args, **kwargs):
"""Associate a callable with an index, that runs concurrently, and will block if the
index is references before the callable has completed
"""
self.set(index, AsyncContextItem(callable, *args, **kwargs))
def set_thread_local(self, index, callable, *args, **kwargs):
"""Associate callable with an index that will be used to create
thread local data.
"""
tlocal_item = _ThreadLocalItem(callable, *args, **kwargs)
self.set(index, tlocal_item)
return tlocal_item.obj
@synchronize
def set_new_thread_local(self, index, callable, *args, **kwargs):
"""Sets a new thread local callable, if the index doesn't yet exist"""
if index not in self:
return self.set_thread_local(index, callable, *args, **kwargs)
else:
return self[index]
def link(self, index, proxy_index):
self.set(index, LinkItem(proxy_index))
@synchronize
def __contains__(self, index, _parse=parse):
indices = _parse(index)
if indices.from_root:
objs = [self.root]
else:
objs = [scope.obj for scope in self._stack._current_frame]
if not indices:
return objs[0]
first, rest = indices.top_tail
try:
for obj in objs:
try:
obj = (
getattr(obj, "__getitem__", None)
or getattr(obj, "__getattribute__")
)(first)
except (TypeError, KeyError, IndexError, AttributeError):
continue
if not rest:
return True
if hasattr(obj, "__moyacontext__"):
obj = obj.__moyacontext__(self)
last = rest.pop()
for name in rest:
obj = (
getattr(obj, "__getitem__", None)
or getattr(obj, "__getattribute__")
)(name)
if hasattr(obj, "__moyacontext__"):
obj = obj.__moyacontext__(self)
if hasattr(obj, "__getitem__"):
return last in obj
else:
return hasattr(obj, last)
except (TypeError, KeyError, IndexError, AttributeError):
return False
return False
@synchronize
def get(self, index, default=Ellipsis, _parse=parse):
indices = _parse(index)
if indices.from_root:
objs = [self.root]
else:
objs = [scope.obj for scope in self._stack._current_frame]
if not indices:
obj = objs[0]
if hasattr(obj, "__moyacontext__"):
obj = obj.__moyacontext__(self)
return obj
first, rest = indices.top_tail
try:
for obj in objs:
try:
obj = (
getattr(obj, "__getitem__", None)
or getattr(obj, "__getattribute__")
)(first)
except (TypeError, KeyError, IndexError, AttributeError):
continue
if hasattr(obj, "__moyacontext__"):
obj = obj.__moyacontext__(self)
for name in rest:
obj = (
getattr(obj, "__getitem__", None)
or getattr(obj, "__getattribute__")
)(name)
if hasattr(obj, "__moyacontext__"):
obj = obj.__moyacontext__(self)
return obj
except (TypeError, KeyError, IndexError, AttributeError):
return Missing(index)
if default is not Ellipsis:
return default
return Missing(index)
@synchronize
def pop(self, index, default=Ellipsis):
value = self.get(index, default=default)
self.safe_delete(index)
return value
def get_simple(self, index):
"""Get a single index key"""
objs = [scope.obj for scope in self._stack._current_frame]
for obj in objs:
try:
val = (
getattr(obj, "__getitem__", None)
or getattr(obj, "__getattribute__")
)(index)
except (TypeError, KeyError, IndexError, AttributeError):
continue
if hasattr(val, "__moyacontext__"):
return val.__moyacontext__(self)
return val
return Missing(index)
def get_first(self, default=None, *indices):
"""Return the first index present, or return a default"""
get = self.get
for index in indices:
value = get(index, Ellipsis)
if value is not Ellipsis:
return value
return default
def inc(self, index):
"""Increment an integer value and return it"""
try:
value = self.get(index, 0) + 1
except ValueError:
value = 0
self.set(index, value)
return value
def dec(self, index):
"""Decrement an integer value and return it"""
try:
value = self.get(index, 0) - 1
except ValueError:
value = 0
self.set(index, value)
return value
def get_first_true(self, default=None, *indices):
"""Return the first index that evaluates to true, or a default"""
get = self.get
for index in indices:
value = get(index, None)
if value:
return value
return default
def get_sub(self, index, default=Ellipsis):
return self.get(self.sub(index), default)
@synchronize
def copy(self, src, dst):
self.set(dst, self.get(src))
@synchronize
def move(self, src, dst):
self.set(dst, self.get(src))
self.delete(src)
@synchronize
def delete(self, index):
obj, final = self._set_lookup(index)
if hasattr(obj, "__getitem__"):
del obj[final]
else:
delattr(obj, final)
@synchronize
def safe_delete(self, *indices):
"""Deletes a value if it exists, or does nothing"""
for index in indices:
obj, final = self._set_lookup(index)
if hasattr(obj, "__getitem__"):
if final in obj:
del obj[final]
else:
if hasattr(obj, final):
delattr(obj, final)
def eval(self, expression, _isinstance=isinstance, _string_types=string_types):
"""Evaluate an expression, can be either a string or an expression compiled with `compile`"""
if _isinstance(expression, _string_types):
return Expression(expression).eval(self)
return expression.eval(self)
def subeval(self, s):
expression = self.sub(s)
if isinstance(expression, string_types):
return Expression(expression).eval(self)
return expression.eval(self)
@synchronize
def keys(self, index=""):
obj = self.get(index)
if hasattr(obj, "__getitem__"):
if hasattr(obj, "keys"):
return list(obj.keys())
else:
return [i for i, _v in enumerate(obj)]
else:
return [k for k in dir(obj) if not k.startswith("_")]
@synchronize
def values(self, index=""):
obj, indices = self._get_obj(index)
keys = self.keys(indices)
return [self.get(join(indices, [k]), None) for k in keys]
@synchronize
def items(self, index=""):
obj = self.get(index)
if hasattr(obj, "__getitem__"):
if hasattr(obj, "items"):
return list(obj.items())
else:
return list(enumerate(obj))
else:
return [(k, getattr(obj, k)) for k in dir(obj) if not k.startswith("_")]
@synchronize
def all_keys(self, max_depth=5):
keys = []
def recurse(index, depth=0):
indices = parse(index)
obj = self.get(indices)
keys.append(dataindex.build(index))
if max_depth is not None and depth >= max_depth:
return
if not isinstance(obj, (bool, slice) + number_types + string_types):
for k, v in self.items(indices):
recurse(join(indices, [k]), depth + 1)
recurse("")
return keys
def stack(self, stack_name, value, stack_callable=list):
return _StackContext(self, stack_name, value, stack_callable=stack_callable)
def root_stack(self, stack_name, value, stack_callable=list):
return _RootStackContext(self, stack_name, value, stack_callable=stack_callable)
@synchronize
def push_stack(self, stack_name, value, stack_callable=list):
"""Create a stack in the root of the context"""
stack_index = "_{}_stack".format(stack_name)
if stack_index not in self.root:
stack = self.root[stack_index] = stack_callable()
else:
stack = self.root[stack_index]
stack.append(value)
self.set(stack_name, LastIndexItem(stack_index, "." + stack_name))
value_index = ".{}.{}".format(stack_index, len(stack) - 1)
return value_index
@synchronize
def pop_stack(self, stack_name):
"""Pop a value from an existing stack"""
stack_index = "._{}_stack".format(stack_name)
stack = self[stack_index]
value = stack.pop()
if not stack:
del self[stack_index]
return value
def get_stack_top(self, stack_name, default=None):
stack = self.get("._{}_stack".format(stack_name), None)
if not stack:
return default
return stack[-1]
@synchronize
def push_thread_local_stack(self, stack_name, value, stack_callable=list):
"""Push a value on to a thread local stack"""
stack_index = "._{}_stack".format(stack_name)
stack = self.set_new_thread_local(stack_index, stack_callable)
stack.append(value)
value_index = "{}.{}".format(stack_index, len(stack) - 1)
return value_index
# Pop thread local stack is the same as the non-local version
pop_thread_local_stack = pop_stack
__setitem__ = set
__getitem__ = get
__delitem__ = delete
if __name__ == "__main__":
c = Context()
c["foo"] = dict(bar={}, baz={})
c["foo.bar.fruits"] = ["apples", "oranges", "pears"]
c["foo.baz.td"] = dict(posts=[1, 2, 3, 4])
c["whooo"] = "wah"
with c.scope("foo"):
with c.scope("bar"):
print(c["fruits"])
print(c["td"])
print(c[".whooo"])
# c = Context()
# c['foo'] = {}
# c.push_frame('foo')
# self.assertEqual(c.get_frame(), 'foo')
# c['bar'] = 1
# self.assertEqual(c.root['foo']['bar'], 1)
# c.pop_frame()
# self.assertEqual(c.get_frame(), '')
# c['baz'] = 2
# self.assertEqual(c.root['baz'], 2)
# c = Context()
# c['foo'] = {}
# c['fruit'] = "apple"
# c['foo.bar'] = {}
# c.push_scope('foo.bar')
# c['ilike'] = c['fruit']
# c.push_scope('.foo')
# c['f'] = 4
# print c.root
|
|
# -*- coding: utf-8 -*-
from girder.exceptions import ValidationException
from girder.models.folder import Folder
from girder.models.setting import Setting
from girder.models.user import User
from tests import base
from girder_item_licenses.settings import PluginSettings
def setUpModule():
base.enabledPlugins.append('item_licenses')
base.startServer()
def tearDownModule():
base.stopServer()
class ItemLicensesTestCase(base.TestCase):
def setUp(self):
super().setUp()
# Create a user
user = {
'email': '[email protected]',
'login': 'user1login',
'firstName': 'First',
'lastName': 'Last',
'password': 'user1password',
'admin': False
}
self.user = User().createUser(**user)
# Get user's private folder
folders = Folder().childFolders(self.user, 'user', user=self.user)
for folder in folders:
if folder['name'] == 'Private':
self.folder = folder
break
def testItemCreateInvalid(self):
"""
Test creating items with invalid licenses.
"""
# Create item with a null name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': None
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
# Create item with an invalid license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': 'Unsupported license'
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
# Create item with a valid license name with extra whitespace
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': ' The MIT License (MIT) '
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertValidationError(resp, 'license')
def testItemCreateAndUpdate(self):
"""
Test creating, reading, and updating an item, especially with regards to
its license field.
"""
# Create item without specifying a license
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Create item with a blank license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': ''
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Update item license
params = {
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Update item license to be unspecified
params = {
'license': ''
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], '')
# Create item with a valid license name
params = {
'name': ' my item name',
'description': ' a description ',
'folderId': self.folder['_id'],
'license': 'The MIT License (MIT)'
}
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'The MIT License (MIT)')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'The MIT License (MIT)')
# Update item
params = {
'name': 'changed name',
'description': 'new description',
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Update item with the same license name
params = {
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s' % resp.json['_id'], method='PUT',
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
def testItemCopy(self):
"""
Test copying an item, especially with regards to its license field.
"""
params = {
'name': 'original item',
'description': 'original description',
'license': 'The MIT License (MIT)',
'folderId': self.folder['_id']
}
# Create item
resp = self.request(path='/item', method='POST', params=params,
user=self.user)
self.assertStatusOk(resp)
origItemId = resp.json['_id']
# Copy to a new item with different name and license.
params = {
'name': 'new item',
'license': 'Apache License 2'
}
resp = self.request(path='/item/%s/copy' % origItemId,
method='POST', user=self.user, params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
# Fetch item
resp = self.request(path='/item/%s' % resp.json['_id'],
params=params, user=self.user)
self.assertStatusOk(resp)
self.assertEqual(resp.json['license'], 'Apache License 2')
def testGetLicenses(self):
"""
Test getting list of licenses.
"""
# Get default settings
resp = self.request(path='/item/licenses', user=self.user, params={
'default': True
})
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Get current settings
resp = self.request(path='/item/licenses', user=self.user)
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Change licenses
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}]},
{'category': 'B', 'licenses': [{'name': '2'}, {'name': '3'}]}])
# Get default settings after changing licenses
resp = self.request(path='/item/licenses', user=self.user, params={
'default': True
})
self.assertStatusOk(resp)
self.assertStatusOk(resp)
self.assertGreater(len(resp.json), 1)
self.assertIn('category', resp.json[0])
self.assertIn('licenses', resp.json[0])
self.assertGreater(len(resp.json[0]['licenses']), 8)
self.assertIn('name', resp.json[0]['licenses'][0])
self.assertGreater(len(resp.json[0]['licenses'][0]['name']), 0)
self.assertIn('name', resp.json[0]['licenses'][1])
self.assertGreater(len(resp.json[0]['licenses'][1]['name']), 0)
# Get current settings after changing licenses
resp = self.request(path='/item/licenses', user=self.user)
self.assertStatusOk(resp)
self.assertCountEqual(
resp.json,
[{'category': 'A', 'licenses': [{'name': '1'}]},
{'category': 'B', 'licenses': [{'name': '2'}, {'name': '3'}]}])
def testLicensesSettingValidation(self):
"""
Test validation of licenses setting.
"""
# Test valid settings
Setting().set(
PluginSettings.LICENSES,
[])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': '1'}, {'name': '2'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []},
{'category': 'B', 'licenses': [{'name': '1'}]}])
Setting().set(
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': []},
{'category': 'B', 'licenses': [{'name': '1'}, {'name': '2'}]}])
# Test invalid top-level types
for val in (None, 1, '', {}, [{}]):
self.assertRaises(ValidationException, Setting().set, PluginSettings.LICENSES, val)
# Test invalid category types
for category, licenses in ((None, []), (1, []), ('', []), ({}, [])):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': category, 'licenses': licenses}])
# Test invalid licenses types
for val in (None, {}, [1], ['']):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': val}])
# Test invalid license names
for val in (None, 1, '', {}, []):
self.assertRaises(
ValidationException,
Setting().set,
PluginSettings.LICENSES,
[{'category': 'A', 'licenses': [{'name': val}]}])
|
|
"""
Topographica cortical map simulator package.
Topographica is designed as a collection of packages from which
elements can be selected to model specific systems. For more
information, see the individual subpackages::
base - Core Topographica functions and classes
plotting - Visualization functions and classes
analysis - Analysis functions and classes (besides plotting)
tkgui - Tk-based graphical user interface (GUI)
command - High-level user commands
misc - Various useful independent modules
The Topographica primitives library consists of a family of classes
that can be used with the above functions and classes::
sheet - Sheet classes: 2D arrays of processing units
projection - Projection classes: connections between Sheets
pattern - PatternGenerator classes: 2D input or weight patterns
ep - EventProcessor classes: other simulation objects
transferfn - Transfer functions, for e.g. normalization or squashing
responsefn - Calculate the response of a Projection
learningfn - Adjust weights for a Projection
coordmapper - CoordinateMapperFn classes: map coords between Sheets
Each of the library directories can be extended with new classes of
the appropriate type, just by adding a new .py file to that directory.
E.g. new PatternGenerator classes can be added to pattern/, and will
then show up in the GUI menus as potential input patterns.
$Id$
"""
__version__ = "$Revision$"
# The tests and the GUI are omitted from this list, and have to be
# imported explicitly if desired.
__all__ = ['analysis',
'base',
'command',
'coordmapper',
'ep',
'learningfn',
'misc',
'numbergen',
'transferfn',
'pattern',
'plotting',
'projection',
'responsefn',
'sheet']
# get set by the topographica script
release = ''
version = ''
import param
import os
import platform
# Default location in which to create files
if platform.system()=='Darwin' or platform.mac_ver()[0]:
_default_output_path = os.path.join(os.path.expanduser("~"),'Documents')
else:
_default_output_path = os.path.join(os.path.expanduser("~"),'topographica')
if not os.path.exists(_default_output_path):
print "Creating %s"%_default_output_path
os.mkdir(_default_output_path)
# Location of topo/ package. This kind of thing won't work with py2exe
# etc. Need to see if we can get rid of it.
_package_path = os.path.split(__file__)[0]
param.normalize_path.prefix = _default_output_path
param.resolve_path.search_paths+=([_default_output_path] + [_package_path])
# CEBALERT (about PIL):
# PIL can be installed so that it's e.g. "from PIL import Image" or
# just "import Image". The code below means Image etc are always
# imported, but then the rest of topographica can consistently use
# Image (rather than a try/except, such as the one below). An
# alternative would be an import hook, which would only run on
# attempting to import Image etc.
try:
import Image
except ImportError:
from PIL import Image, ImageOps, ImageDraw, ImageFont
import sys
sys.modules['Image']=Image
sys.modules['ImageOps']=ImageOps
sys.modules['ImageDraw']=ImageDraw
sys.modules['ImageFont']=ImageFont
# ImageTk is completely optional
try:
import ImageTk
except ImportError:
try:
from PIL import ImageTk
import sys
sys.modules['ImageTk']=ImageTk
except ImportError:
pass
# CEBALERT: can we move these pickle support functions elsewhere? In
# fact, can we just gather all the non-legacy pickle garbage into one
# place? Pickle clutter adds complexity, and having all the pickle
# support in one places makes it easier for other developers to copy
# in new situations.
# (note that these _pickle_support functions also work for deep copying)
def _numpy_ufunc_pickle_support():
"""
Allow instances of numpy.ufunc to pickle.
"""
# Remove this when numpy.ufuncs themselves support pickling.
# Code from Robert Kern; see:
#http://news.gmane.org/find-root.php?group=gmane.comp.python.numeric.general&article=13400
from numpy import ufunc
import copy_reg
def ufunc_pickler(ufunc):
"""Return the ufunc's name"""
return ufunc.__name__
copy_reg.pickle(ufunc,ufunc_pickler)
_numpy_ufunc_pickle_support()
def _mpq_pickle_support():
"""Allow instances of gmpy.mpq to pickle."""
from gmpy import mpq
mpq_type = type(mpq(1,10)) # gmpy doesn't appear to expose the type another way
import copy_reg
copy_reg.pickle(mpq_type,lambda q: (mpq,(q.digits(),)))
def _instance_method_pickle_support():
"""Allow instance methods to pickle."""
# CB: well, it seems to work - maybe there are cases where this
# wouldn't work?
# Alternative technique (totally different approach), but would
# only work with pickle (not cPickle):
# http://code.activestate.com/recipes/572213/
def _pickle_instance_method(mthd):
mthd_name = mthd.im_func.__name__
obj = mthd.im_self
return getattr, (obj,mthd_name)
import copy_reg, types
copy_reg.pickle(types.MethodType, _pickle_instance_method)
_instance_method_pickle_support()
from topo.base.simulation import Simulation
# Set the default value of Simulation.time_type to gmpy.mpq. If gmpy
# is unavailable, use the slower fixedpoint.FixedPoint.
try:
import gmpy
Simulation.time_type = gmpy.mpq
Simulation.time_type_args = ()
_mpq_pickle_support()
except ImportError:
import topo.misc.fixedpoint as fixedpoint
param.Parameterized().warning('gmpy.mpq not available; using slower fixedpoint.FixedPoint for simulation time.')
Simulation.time_type = fixedpoint.FixedPoint
Simulation.time_type_args = (4,) # gives precision=4
# Provide a fake gmpy.mpq (to allow e.g. pickled test data to be
# loaded).
# CEBALERT: can we move this into whatever test needs it? I guess
# it also has to be here to allow snapshots saved using gmpy time
# type to open on systems where gmpy is not available.
from topo.misc.util import gmpyImporter
import sys
sys.meta_path.append(gmpyImporter())
sim = Simulation()
def about(display=True):
"""Print release and licensing information."""
ABOUT_TEXT = """
Pre-release version %s (%s) of Topographica; an updated
version may be available from topographica.org.
This program is free, open-source software available under the BSD
license (http://www.opensource.org/licenses/bsd-license.php).
"""%(release,version)
if display:
print ABOUT_TEXT
else:
return ABOUT_TEXT
# Set most floating-point errors to be fatal for safety; see
# topo/misc/patternfn.py for examples of how to disable
# the exceptions when doing so is safe. Underflow is always
# considered safe; e.g. input patterns can be very small
# at large distances, and when they are scaled by small
# weights underflows are common and not a problem.
from numpy import seterr
old_seterr_settings=seterr(all="raise",under="ignore")
|
|
__authors__ = ['Andrew Taylor']
import random
import time
import pygame
# Python comes with some color conversion methods.
import colorsys
# For Math things, what else
import math
from VisualisationPlugin import VisualisationPlugin
import logging
# Video available here:
# http://www.youtube.com/watch?v=ySJlUu2926A&feature=youtu.be
class SpeedingBlobsVisualisationPlugin(VisualisationPlugin):
logger = logging.getLogger(__name__)
speed_blobs = None
blob_speeds = [500]
def configure(self, config):
self.config = config
self.logger.info("Config: %s" % config)
def new_random_blob(self, canvas):
blob_entry = {}
# Random Speed, ms/pixel
blob_entry["speed"] = self.blob_speeds[random.randint(0, len(self.blob_speeds) - 1)]
w = canvas.get_width()
h = canvas.get_height()
# Random X location
blob_entry["start_x"] = random.randint(0, w)
# Random Y location
blob_entry["start_y"] = random.randint(0, h)
# Random direction
direction = {}
direction["x"] = random.randint(0, 5) - 2
direction["y"] = random.randint(0, 5) - 2
if (direction["x"] == 0 and direction["y"] == 0):
direction["x"] = 1
blob_entry["direction"] = direction
# Start time
blob_entry["start_time"] = pygame.time.get_ticks()
# Random colour
blob_entry["colour"] = float(random.randint(0, 100)) / 200.0
blob_entry["decay"] = float(random.randint(3, 6))
blob_entry["complete"] = False
return blob_entry
def initial_blob_config(self, canvas):
# Put 5 blobs in
self.speed_blobs = []
for i in range(4):
self.speed_blobs.append(self.new_random_blob(canvas))
return self.speed_blobs
def configure(self, config):
self.config = config
self.logger.info("Config: %s" % config)
self.clock = pygame.time.Clock()
# Example, and following two functions taken from http://www.pygame.org/wiki/RGBColorConversion
# Normalization method, so the colors are in the range [0, 1]
def normalize(self, color):
return color[0] / 255.0, color[1] / 255.0, color[2] / 255.0
# Reformats a color tuple, that uses the range [0, 1] to a 0xFF
# representation.
def reformat(self, color):
return int(round(color[0] * 255)) % 256, \
int(round(color[1] * 255)) % 256, \
int(round(color[2] * 255)) % 256
def draw_frame(self, canvas):
if self.speed_blobs is None:
self.initial_blob_config(canvas)
t = pygame.time.get_ticks()
self.logger.debug("Ticks: %d" % t)
canvas = self.draw_blobs(canvas, self.speed_blobs, t)
# Check to see if we need to replace a blob with a new one
for idx, blob in enumerate(self.speed_blobs):
if blob.get("complete") is True:
self.speed_blobs[idx] = self.new_random_blob(canvas)
# Limit the frame rate
self.clock.tick(25)
return canvas
def draw_splash(self, canvas):
"""
Construct a splash screen suitable to display for a plugin selection menu
"""
test_blobs = []
blob_entry = {}
# Random X location
blob_entry["x"] = 2
# Random Y location
blob_entry["y"] = 2
# Random colour
blob_entry["colour"] = 0.2
blob_entry["height"] = 2
blob_entry["decay"] = 10
test_blobs.append(blob_entry)
blob_entry = {}
# Random X location
blob_entry["x"] = (canvas.get_width() - 1) / 2.0
# Random Y location
blob_entry["y"] = (canvas.get_height() - 1) / 2.0
# Random colour
blob_entry["colour"] = 0.5
blob_entry["height"] = 0.5
blob_entry["decay"] = 7.0
test_blobs.append(blob_entry)
blob_entry = {}
# Random X location
blob_entry["x"] = (canvas.get_width() - 1) / 2.0 + 5
# Random Y location
blob_entry["y"] = (canvas.get_height() - 1) / 2.0
# Random colour
blob_entry["colour"] = 0.5
blob_entry["height"] = 0.5
blob_entry["decay"] = 7.0
test_blobs.append(blob_entry)
# Draw the blobs
canvas = self.draw_blobs(canvas, test_blobs, 0)
return canvas
def draw_blobs(self, canvas, blobs, t):
# Period
t_background_period = 20000
# Fraction of the way through
background_hue = (float(t) / float(t_background_period)) % 1
# Create a blank "sheet"
sheet = [[0 for y in range(canvas.get_height())] for x in range(canvas.get_width())]
# Draw all of the blobs
for blob in blobs:
blob_height = blob["colour"]
# If the blobs are defined as static, then
# draw them where they lie, else calculate
# where they should appear
blob_x = blob.get("x")
blob_y = blob.get("y")
if blob_x is None or blob_y is None:
# try to calculate the blob's position
t_delta = t - blob["start_time"]
# print "%d" % t_delta
squares_to_travel = float(t_delta) / float(blob["speed"])
direction = blob["direction"]
offset = blob["decay"]
x_offset = 0
y_offset = 0
x_delta = 0
y_delta = 0
if (direction["x"] == 0):
x_offset = 0
else:
x_delta = direction["x"] * squares_to_travel - blob["start_x"]
if (direction["x"] < 0):
x_offset = blob["decay"] + canvas.get_width()
if (direction["x"] > 0):
x_offset = -blob["decay"]
if (direction["y"] == 0):
y_offset = 0
else:
y_delta = direction["y"] * squares_to_travel - blob["start_y"]
if (direction["y"] < 0):
y_offset = blob["decay"] + canvas.get_height()
if (direction["y"] > 0):
y_offset = -blob["decay"]
# print "x_dir %d x_offset %d , y_dir %d y_offset %d" % (direction["x"], x_offset, direction["y"], y_offset)
blob_x = blob["start_x"] + x_delta + x_offset
blob_y = blob["start_y"] + y_delta + y_offset
if (direction["x"] > 0):
if (blob_x > canvas.get_width() + blob["decay"]):
blob["complete"] = True
else:
if (blob_x < 0 - blob["decay"]):
blob["complete"] = True
if (direction["y"] > 0):
if (blob_y > canvas.get_height() + blob["decay"]):
blob["complete"] = True
else:
if (blob_y < 0 - blob["decay"]):
blob["complete"] = True
# The central pixel should remain the correct colour at all times.
# The problem occurs when the background colour 'overtakes' the blob colour
# bg hue [0,1] , blob hue say 0.5
# For blob hue > bg hue, then it is straight forward, the hue gradually
# decreases until it meets the bg hue value (according to the appropriate
# drop-off formula
# For bg hue > blob hue, then the decay starts to go in the other direction,
# with a negative delta, and the hue should actually be increased up to the
# bg hue value
# But then what happens when the bg hue wraps?
# The bg hue wraps from 0 to 1, and now what happens to the decay? where previously
# it may have gone through the green end of the spectrum, not it has to go through
# blue according to the above formula.
# If we think of the canvas as an sheet, and the blobs pinch the sheet up (like the general
# relativity rubber-sheet analogy, but the other way up) then it doesn't matter that numbers
# wrap, we just want to apply a height map colour, with the bottom varying
for x in range(canvas.get_width()):
for y in range(canvas.get_height()):
# Calculate how far away from the centre of the blob the centre of this pixel is
x_d = x - blob_x
y_d = y - blob_y
distance_away = math.sqrt(x_d * x_d + y_d * y_d)
decay = blob["decay"]
# Only draw pixels in the decay zone
if (distance_away < decay):
# Calculate the scaling factor
decay_amount = (math.cos(math.pi * distance_away / decay) + 1.0) / 2.0
# This compounds any blobs on top of each other automatically
sheet[x][y] += (blob_height * decay_amount)
# Now translate the sheet height into colours
for x in range(canvas.get_width()):
for y in range(canvas.get_height()):
hue = background_hue + sheet[x][y]
rgb_colour = self.reformat(colorsys.hsv_to_rgb(hue, 1.0, 1.0))
canvas.set_pixel(x, y, rgb_colour)
return canvas
|
|
from __future__ import absolute_import
import logging
from enum import Enum as NativeEnum, IntEnum as NativeIntEnum
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, cast
import six
try:
from django.utils.functional import classproperty # type: ignore
except ImportError:
# Pre-Django 3.1
from django.utils.decorators import classproperty
from django_enumfield.db.fields import EnumField
__all__ = ("Enum", "EnumField")
logger = logging.getLogger(__name__)
RAISE = object()
class BlankEnum(NativeEnum):
BLANK = ""
@property
def label(self):
return ""
def classdispatcher(class_method):
class _classdispatcher(object):
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, cls=None):
if instance is None:
return getattr(cls, class_method)
return self.fget(instance)
return _classdispatcher
Default = TypeVar("Default")
T = TypeVar("T", bound="Enum")
@six.python_2_unicode_compatible
class Enum(NativeIntEnum):
""" A container for holding and restoring enum values """
__labels__ = {} # type: Dict[int, six.text_type]
__default__ = None # type: Optional[int]
__transitions__ = {} # type: Dict[int, Sequence[int]]
def __str__(self):
return self.label
@classdispatcher("get_name")
def name(self):
# type: () -> str
return self._name_
@classdispatcher("get_label")
def label(self):
# type: () -> str
"""Get human readable label for the matching Enum.Value.
:return: label for value
:rtype: str
"""
label = cast(str, self.__class__.__labels__.get(self.value, self.name))
return six.text_type(label)
@classproperty
def do_not_call_in_templates(cls):
# type: () -> bool
# Fix for Django templates so that any lookups of enums won't fail
# More info: https://stackoverflow.com/questions/35953132/how-to-access-enum-types-in-django-templates # noqa: E501
return True
@classproperty
def values(cls): # type: ignore
return {member.value: member for member in cls}
def deconstruct(self):
"""
See "Adding a deconstruct() method" in
https://docs.djangoproject.com/en/1.8/topics/migrations/
"""
c = self.__class__
path = "{}.{}".format(c.__module__, c.__name__)
return path, [self.value], {}
@classmethod
def items(cls):
# type: () -> List[Tuple[str, int]]
"""
:return: List of tuples consisting of every enum value in the form
[('NAME', value), ...]
"""
items = [(member.name, member.value) for member in cls]
return sorted(items, key=lambda x: x[1])
@classmethod
def choices(cls, blank=False):
# type: (bool) -> List[Tuple[Union[int, str], NativeEnum]]
"""Choices for Enum
:return: List of tuples (<value>, <member>)
"""
choices = sorted(
[(member.value, member) for member in cls], key=lambda x: x[0]
) # type: List[Tuple[Union[str, int], NativeEnum]]
if blank:
choices.insert(0, (BlankEnum.BLANK.value, BlankEnum.BLANK))
return choices
@classmethod
def default(cls):
# type: () -> Optional[Enum]
"""Default Enum value. Set default value to `__default__` attribute
of your enum class or override this method if you need another
default value.
Usage:
IntegerField(choices=my_enum.choices(), default=my_enum.default(), ...
:return Default value, if set.
"""
if cls.__default__ is not None:
return cast(Enum, cls(cls.__default__))
return None
@classmethod
def field(cls, **kwargs):
# type: (Any) -> EnumField
"""A shortcut for field declaration
Usage:
class MyModelStatuses(Enum):
UNKNOWN = 0
class MyModel(Model):
status = MyModelStatuses.field()
:param kwargs: Arguments passed in EnumField.__init__()
:rtype: EnumField
"""
return EnumField(cls, **kwargs)
@classmethod
def get(
cls,
name_or_numeric, # type: Union[str, int, T]
default=None, # type: Optional[Default]
):
# type: (...) -> Union[Enum, Optional[Default]]
"""Get Enum.Value object matching the value argument.
:param name_or_numeric: Integer value or attribute name
:param default: The default to return if the value passed is not
a valid enum value
"""
if isinstance(name_or_numeric, cls):
return name_or_numeric
if isinstance(name_or_numeric, int):
try:
return cls(name_or_numeric)
except ValueError:
pass
elif isinstance(name_or_numeric, six.string_types):
try:
return cls[name_or_numeric]
except KeyError:
pass
return default
@classmethod
def get_name(cls, name_or_numeric):
# type: (Union[str, int, T]) -> Optional[str]
"""Get Enum.Value name matching the value argument.
:param name_or_numeric: Integer value or attribute name
:return: The name or None if not found
"""
value = cls.get(name_or_numeric)
if value is not None:
return value.name
return None
@classmethod
def get_label(cls, name_or_numeric):
# type: (Union[str, int, Enum]) -> Optional[str]
"""Get Enum.Value label matching the value argument.
:param name_or_numeric: Integer value or attribute name
:return: The label or None if not found
"""
value = cls.get(name_or_numeric)
if value is not None:
return value.label
return None
@classmethod
def is_valid_transition(cls, from_value, to_value):
# type: (Union[int, Enum], Union[int, Enum]) -> bool
"""Will check if to_value is a valid transition from from_value.
Returns true if it is a valid transition.
:param from_value: Start transition point
:param to_value: End transition point
:return: Success flag
"""
if isinstance(from_value, cls):
from_value = from_value.value
if isinstance(to_value, cls):
to_value = to_value.value
return (
from_value == to_value
or not cls.__transitions__
or (from_value in cls.transition_origins(to_value))
)
@classmethod
def transition_origins(cls, to_value):
# type: (Union[int, T]) -> Sequence[int]
"""Returns all values the to_value can make a transition from.
:param to_value End transition point
"""
if isinstance(to_value, cls):
to_value = to_value.value
return cast(Sequence[int], cls.__transitions__.get(to_value, []))
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Gradient-based attribution."""
from typing import cast, List, Text, Optional
from absl import logging
from lit_nlp.api import components as lit_components
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import dtypes
from lit_nlp.api import model as lit_model
from lit_nlp.api import types
from lit_nlp.components.citrus import utils as citrus_utils
from lit_nlp.lib import utils
import numpy as np
JsonDict = types.JsonDict
Spec = types.Spec
CLASS_KEY = 'Class to explain'
NORMALIZATION_KEY = 'Normalize'
INTERPOLATION_KEY = 'Interpolation steps'
class GradientNorm(lit_components.Interpreter):
"""Salience map from gradient L2 norm."""
def find_fields(self, output_spec: Spec) -> List[Text]:
# Find TokenGradients fields
grad_fields = utils.find_spec_keys(output_spec, types.TokenGradients)
# Check that these are aligned to Tokens fields
for f in grad_fields:
tokens_field = output_spec[f].align # pytype: disable=attribute-error
assert tokens_field in output_spec
assert isinstance(output_spec[tokens_field], types.Tokens)
return grad_fields
def _interpret(self, grads: np.ndarray, tokens: np.ndarray):
assert grads.shape[0] == len(tokens)
# Norm of dy/d(embs)
grad_norm = np.linalg.norm(grads, axis=1)
grad_norm /= np.sum(grad_norm)
# <float32>[num_tokens]
return grad_norm
def run(self,
inputs: List[JsonDict],
model: lit_model.Model,
dataset: lit_dataset.Dataset,
model_outputs: Optional[List[JsonDict]] = None,
config: Optional[JsonDict] = None) -> Optional[List[JsonDict]]:
"""Run this component, given a model and input(s)."""
# Find gradient fields to interpret
output_spec = model.output_spec()
grad_fields = self.find_fields(output_spec)
logging.info('Found fields for gradient attribution: %s', str(grad_fields))
if len(grad_fields) == 0: # pylint: disable=g-explicit-length-test
return None
# Run model, if needed.
if model_outputs is None:
model_outputs = list(model.predict(inputs))
assert len(model_outputs) == len(inputs)
all_results = []
for o in model_outputs:
# Dict[field name -> interpretations]
result = {}
for grad_field in grad_fields:
token_field = cast(types.TokenGradients, output_spec[grad_field]).align
tokens = o[token_field]
scores = self._interpret(o[grad_field], tokens)
result[grad_field] = dtypes.TokenSalience(tokens, scores)
all_results.append(result)
return all_results
def is_compatible(self, model: lit_model.Model):
compatible_fields = self.find_fields(model.output_spec())
return len(compatible_fields)
def meta_spec(self) -> types.Spec:
return {'saliency': types.TokenSalience(autorun=True, signed=False)}
class GradientDotInput(lit_components.Interpreter):
"""Salience map using the values of gradient * input as attribution."""
def find_fields(self, input_spec: Spec, output_spec: Spec) -> List[Text]:
# Find TokenGradients fields
grad_fields = utils.find_spec_keys(output_spec, types.TokenGradients)
# Check that these are aligned to Tokens fields
aligned_fields = []
for f in grad_fields:
tokens_field = output_spec[f].align # pytype: disable=attribute-error
assert tokens_field in output_spec
assert isinstance(output_spec[tokens_field], types.Tokens)
embeddings_field = output_spec[f].grad_for
if embeddings_field is not None:
assert embeddings_field in input_spec
assert isinstance(input_spec[embeddings_field], types.TokenEmbeddings)
assert embeddings_field in output_spec
assert isinstance(output_spec[embeddings_field], types.TokenEmbeddings)
aligned_fields.append(f)
else:
logging.info('Skipping %s since embeddings field not found.', str(f))
return aligned_fields
def _interpret(self, grads: np.ndarray, embs: np.ndarray):
assert grads.shape == embs.shape
# dot product of gradients and embeddings
# <float32>[num_tokens]
grad_dot_input = np.sum(grads * embs, axis=-1)
scores = citrus_utils.normalize_scores(grad_dot_input)
return scores
def run(self,
inputs: List[JsonDict],
model: lit_model.Model,
dataset: lit_dataset.Dataset,
model_outputs: Optional[List[JsonDict]] = None,
config: Optional[JsonDict] = None) -> Optional[List[JsonDict]]:
"""Run this component, given a model and input(s)."""
# Find gradient fields to interpret
input_spec = model.input_spec()
output_spec = model.output_spec()
grad_fields = self.find_fields(input_spec, output_spec)
logging.info('Found fields for gradient attribution: %s', str(grad_fields))
if len(grad_fields) == 0: # pylint: disable=g-explicit-length-test
return None
# Run model, if needed.
if model_outputs is None:
model_outputs = list(model.predict(inputs))
assert len(model_outputs) == len(inputs)
all_results = []
for o in model_outputs:
# Dict[field name -> interpretations]
result = {}
for grad_field in grad_fields:
embeddings_field = cast(types.TokenGradients,
output_spec[grad_field]).grad_for
scores = self._interpret(o[grad_field], o[embeddings_field])
token_field = cast(types.TokenGradients, output_spec[grad_field]).align
tokens = o[token_field]
result[grad_field] = dtypes.TokenSalience(tokens, scores)
all_results.append(result)
return all_results
def is_compatible(self, model: lit_model.Model):
compatible_fields = self.find_fields(
model.input_spec(), model.output_spec())
return len(compatible_fields)
def meta_spec(self) -> types.Spec:
return {'saliency': types.TokenSalience(autorun=True, signed=True)}
class IntegratedGradients(lit_components.Interpreter):
"""Salience map from Integrated Gradients.
Integrated Gradients is an attribution method originally proposed in
Sundararajan et al. (https://arxiv.org/abs/1703.01365), which attributes an
importance value for each input feature based on the gradients of the model
output with respect to the input. The feature attribution values are
calculated by taking the integral of gradients along a straight path from a
baseline to the input being analyzed. The original implementation can be
found at: https://github.com/ankurtaly/Integrated-Gradients/blob/master/
BertModel/bert_model_utils.py
This component requires that the following fields in the model spec. Field
names like `embs` are placeholders; you can call them whatever you like,
and as with other LIT components multiple segments are supported.
Output:
- TokenEmbeddings (`embs`) to return the input embeddings
- TokenGradients (`grads`) to return gradients w.r.t. `embs`
- A label field (`target`) to return the label that `grads`
was computed for. This is usually a CategoryLabel, but can be anything
since it will just be fed back into the model.
Input
- TokenEmbeddings (`embs`) to accept the modified input embeddings
- A label field to (`target`) to pin the gradient target to the same
label for all integral steps, since the argmax prediction may change.
"""
def find_fields(self, input_spec: Spec, output_spec: Spec) -> List[Text]:
# Find TokenGradients fields
grad_fields = utils.find_spec_keys(output_spec, types.TokenGradients)
# Check that these are aligned to Tokens fields
aligned_fields = []
for f in grad_fields:
tokens_field = output_spec[f].align # pytype: disable=attribute-error
# Skips this grad field if an aligned token field isn't specified.
if tokens_field is None:
continue
assert tokens_field in output_spec
assert isinstance(output_spec[tokens_field], types.Tokens)
embeddings_field = output_spec[f].grad_for
grad_class_key = output_spec[f].grad_target_field_key
if embeddings_field is not None and grad_class_key is not None:
assert embeddings_field in input_spec
assert isinstance(input_spec[embeddings_field], types.TokenEmbeddings)
assert embeddings_field in output_spec
assert isinstance(output_spec[embeddings_field], types.TokenEmbeddings)
assert grad_class_key in input_spec
assert grad_class_key in output_spec
aligned_fields.append(f)
else:
logging.info('Skipping %s since embeddings field not found.', str(f))
return aligned_fields
def get_interpolated_inputs(self, baseline: np.ndarray, target: np.ndarray,
num_steps: int) -> np.ndarray:
"""Gets num_step linearly interpolated inputs from baseline to target."""
if num_steps <= 0: return np.array([])
if num_steps == 1: return np.array([baseline, target])
delta = target - baseline # <float32>[num_tokens, emb_size]
# Creates scale values array of shape [num_steps, num_tokens, emb_dim],
# where the values in scales[i] are the ith step from np.linspace.
# <float32>[num_steps, 1, 1]
scales = np.linspace(0, 1, num_steps + 1,
dtype=np.float32)[:, np.newaxis, np.newaxis]
shape = (num_steps + 1,) + delta.shape
# <float32>[num_steps, num_tokens, emb_size]
deltas = scales * np.broadcast_to(delta, shape)
interpolated_inputs = baseline + deltas
return interpolated_inputs # <float32>[num_steps, num_tokens, emb_size]
def estimate_integral(self, path_gradients: np.ndarray) -> np.ndarray:
"""Estimates the integral of the path_gradients using trapezoid rule."""
path_gradients = (path_gradients[:-1] + path_gradients[1:]) / 2
# There are num_steps elements in the path_gradients. Summing num_steps - 1
# terms and dividing by num_steps - 1 is equivalent to taking
# the average.
return np.average(path_gradients, axis=0)
def get_baseline(self, embeddings: np.ndarray) -> np.ndarray:
"""Returns baseline embeddings to use in Integrated Gradients."""
# Replaces embeddings in the original input with the zero embedding, or
# with the specified token embedding.
baseline = np.zeros_like(embeddings)
# TODO(ellenj): Add option to use a token's embedding as the baseline.
return baseline
def get_salience_result(self, model_input: JsonDict, model: lit_model.Model,
interpolation_steps: int, normalize: bool,
class_to_explain: str, model_output: JsonDict,
grad_fields: List[Text]):
result = {}
output_spec = model.output_spec()
# We ensure that the embedding and gradient class fields are present in the
# model's input spec in find_fields().
embeddings_fields = [
cast(types.TokenGradients,
output_spec[grad_field]).grad_for for grad_field in grad_fields]
# The gradient class input is used to specify the target class of the
# gradient calculation (if unspecified, this option defaults to the argmax,
# which could flip between interpolated inputs).
# If class_to_explain is emptystring, then explain the argmax class.
grad_class_key = cast(types.TokenGradients,
output_spec[grad_fields[0]]).grad_target_field_key
if class_to_explain == '': # pylint: disable=g-explicit-bool-comparison
grad_class = model_output[grad_class_key]
else:
grad_class = class_to_explain
interpolated_inputs = {}
all_embeddings = []
all_baselines = []
for embed_field in embeddings_fields:
# <float32>[num_tokens, emb_size]
embeddings = np.array(model_output[embed_field])
all_embeddings.append(embeddings)
# Starts with baseline of zeros. <float32>[num_tokens, emb_size]
baseline = self.get_baseline(embeddings)
all_baselines.append(baseline)
# Get interpolated inputs from baseline to original embedding.
# <float32>[interpolation_steps, num_tokens, emb_size]
interpolated_inputs[embed_field] = self.get_interpolated_inputs(
baseline, embeddings, interpolation_steps)
# Create model inputs and populate embedding field(s).
inputs_with_embeds = []
for i in range(interpolation_steps):
input_copy = model_input.copy()
# Interpolates embeddings for all inputs simultaneously.
for embed_field in embeddings_fields:
# <float32>[num_tokens, emb_size]
input_copy[embed_field] = interpolated_inputs[embed_field][i]
input_copy[grad_class_key] = grad_class
inputs_with_embeds.append(input_copy)
embed_outputs = model.predict(inputs_with_embeds)
# Create list with concatenated gradients for each interpolate input.
gradients = []
for o in embed_outputs:
# <float32>[total_num_tokens, emb_size]
interp_gradients = np.concatenate([o[field] for field in grad_fields])
gradients.append(interp_gradients)
# <float32>[interpolation_steps, total_num_tokens, emb_size]
path_gradients = np.stack(gradients, axis=0)
# Calculate integral
# <float32>[total_num_tokens, emb_size]
integral = self.estimate_integral(path_gradients)
# <float32>[total_num_tokens, emb_size]
concat_embeddings = np.concatenate(all_embeddings)
# <float32>[total_num_tokens, emb_size]
concat_baseline = np.concatenate(all_baselines)
# <float32>[total_num_tokens, emb_size]
integrated_gradients = integral * (np.array(concat_embeddings) -
np.array(concat_baseline))
# Dot product of integral values and (embeddings - baseline).
# <float32>[total_num_tokens]
attributions = np.sum(integrated_gradients, axis=-1)
# <float32>[total_num_tokens]
scores = citrus_utils.normalize_scores(
attributions) if normalize else attributions
for grad_field in grad_fields:
# Format as salience map result.
token_field = cast(types.TokenGradients, output_spec[grad_field]).align
tokens = model_output[token_field]
# Only use the scores that correspond to the tokens in this grad_field.
# The gradients for all input embeddings were concatenated in the order
# of the grad fields, so they can be sliced out in the same order.
sliced_scores = scores[:len(tokens)] # <float32>[num_tokens in field]
scores = scores[len(tokens):] # <float32>[num_remaining_tokens]
assert len(tokens) == len(sliced_scores)
result[grad_field] = dtypes.TokenSalience(tokens, sliced_scores)
return result
def run(self,
inputs: List[JsonDict],
model: lit_model.Model,
dataset: lit_dataset.Dataset,
model_outputs: Optional[List[JsonDict]] = None,
config: Optional[JsonDict] = None) -> Optional[List[JsonDict]]:
"""Run this component, given a model and input(s)."""
config = config or {}
class_to_explain = config.get(CLASS_KEY,
self.config_spec()[CLASS_KEY].default)
interpolation_steps = int(
config.get(INTERPOLATION_KEY,
self.config_spec()[INTERPOLATION_KEY].default))
normalization = config.get(NORMALIZATION_KEY,
self.config_spec()[NORMALIZATION_KEY].default)
# Find gradient fields to interpret
input_spec = model.input_spec()
output_spec = model.output_spec()
grad_fields = self.find_fields(input_spec, output_spec)
logging.info('Found fields for integrated gradients: %s', str(grad_fields))
if len(grad_fields) == 0: # pylint: disable=g-explicit-length-test
return None
# Run model, if needed.
if model_outputs is None:
model_outputs = list(model.predict(inputs))
all_results = []
for model_output, model_input in zip(model_outputs, inputs):
result = self.get_salience_result(model_input, model, interpolation_steps,
normalization, class_to_explain,
model_output, grad_fields)
all_results.append(result)
return all_results
def is_compatible(self, model: lit_model.Model):
compatible_fields = self.find_fields(
model.input_spec(), model.output_spec())
return len(compatible_fields)
def config_spec(self) -> types.Spec:
return {
CLASS_KEY:
types.TextSegment(default=''),
NORMALIZATION_KEY:
types.Boolean(default=True),
INTERPOLATION_KEY:
types.Scalar(min_val=5, max_val=100, default=30, step=1)
}
def meta_spec(self) -> types.Spec:
return {'saliency': types.TokenSalience(autorun=False, signed=True)}
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._subscriptions_operations import build_check_zone_peers_request, build_get_request, build_list_locations_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionsOperations:
"""SubscriptionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.subscriptions.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_locations(
self,
subscription_id: str,
**kwargs: Any
) -> AsyncIterable["_models.LocationListResult"]:
"""Gets all available geo-locations.
This operation provides all the locations that are available for resource providers; however,
each resource provider may support a subset of this list.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2018_06_01.models.LocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_locations_request(
subscription_id=subscription_id,
template_url=self.list_locations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_locations_request(
subscription_id=subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("LocationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_locations.metadata = {'url': '/subscriptions/{subscriptionId}/locations'} # type: ignore
@distributed_trace_async
async def get(
self,
subscription_id: str,
**kwargs: Any
) -> "_models.Subscription":
"""Gets details about a specified subscription.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subscription, or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.Subscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.SubscriptionListResult"]:
"""Gets all subscriptions for a tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubscriptionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2018_06_01.models.SubscriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SubscriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions'} # type: ignore
@distributed_trace_async
async def check_zone_peers(
self,
subscription_id: str,
parameters: "_models.CheckZonePeersRequest",
**kwargs: Any
) -> "_models.CheckZonePeersResult":
"""Compares a subscriptions logical zone mapping.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param parameters: Parameters for checking zone peers.
:type parameters: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.CheckZonePeersRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckZonePeersResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.CheckZonePeersResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckZonePeersResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'CheckZonePeersRequest')
request = build_check_zone_peers_request(
subscription_id=subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_zone_peers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckZonePeersResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_zone_peers.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Resources/checkZonePeers/'} # type: ignore
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
##############################################################################
A class used for computing different types of DNA descriptors!
You can freely use and distribute it. If you have any problem,
you could contact with us timely.
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.14
Email: [email protected] and [email protected]
##############################################################################
"""
# First party modules
from PyBioMed.PyDNA.PyDNApsenacutil import ExtendPhycheIndex
from PyBioMed.PyDNA.PyDNAutil import GetData
def CheckPsenac(lamada, w, k):
"""Check the validation of parameter lamada, w and k.
"""
try:
if not isinstance(lamada, int) or lamada <= 0:
raise ValueError(
"Error, parameter lamada must be an int type and larger than and equal to 0."
)
elif w > 1 or w < 0:
raise ValueError("Error, parameter w must be ranged from 0 to 1.")
elif not isinstance(k, int) or k <= 0:
raise ValueError(
"Error, parameter k must be an int type and larger than 0."
)
except ValueError:
raise
def GetSequenceListAndPhycheValuePsednc(input_data, extra_phyche_index=None):
"""For PseDNC, PseKNC, make sequence_list and phyche_value.
:param input_data: file type or handle.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if extra_phyche_index is None:
extra_phyche_index = {}
original_phyche_value = {
"AA": [0.06, 0.5, 0.27, 1.59, 0.11, -0.11],
"AC": [1.50, 0.50, 0.80, 0.13, 1.29, 1.04],
"AG": [0.78, 0.36, 0.09, 0.68, -0.24, -0.62],
"AT": [1.07, 0.22, 0.62, -1.02, 2.51, 1.17],
"CA": [-1.38, -1.36, -0.27, -0.86, -0.62, -1.25],
"CC": [0.06, 1.08, 0.09, 0.56, -0.82, 0.24],
"CG": [-1.66, -1.22, -0.44, -0.82, -0.29, -1.39],
"CT": [0.78, 0.36, 0.09, 0.68, -0.24, -0.62],
"GA": [-0.08, 0.5, 0.27, 0.13, -0.39, 0.71],
"GC": [-0.08, 0.22, 1.33, -0.35, 0.65, 1.59],
"GG": [0.06, 1.08, 0.09, 0.56, -0.82, 0.24],
"GT": [1.50, 0.50, 0.80, 0.13, 1.29, 1.04],
"TA": [-1.23, -2.37, -0.44, -2.24, -1.51, -1.39],
"TC": [-0.08, 0.5, 0.27, 0.13, -0.39, 0.71],
"TG": [-1.38, -1.36, -0.27, -0.86, -0.62, -1.25],
"TT": [0.06, 0.5, 0.27, 1.59, 0.11, -0.11],
}
sequence_list = GetData(input_data)
phyche_value = ExtendPhycheIndex(original_phyche_value, extra_phyche_index)
return sequence_list, phyche_value
def GetSequenceListAndPhycheValuePseknc(input_data, extra_phyche_index=None):
"""For PseDNC, PseKNC, make sequence_list and phyche_value.
:param input_data: file type or handle.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if extra_phyche_index is None:
extra_phyche_index = {}
original_phyche_value = {
"AA": [0.06, 0.5, 0.09, 1.59, 0.11, -0.11],
"AC": [1.5, 0.5, 1.19, 0.13, 1.29, 1.04],
"GT": [1.5, 0.5, 1.19, 0.13, 1.29, 1.04],
"AG": [0.78, 0.36, -0.28, 0.68, -0.24, -0.62],
"CC": [0.06, 1.08, -0.28, 0.56, -0.82, 0.24],
"CA": [-1.38, -1.36, -1.01, -0.86, -0.62, -1.25],
"CG": [-1.66, -1.22, -1.38, -0.82, -0.29, -1.39],
"TT": [0.06, 0.5, 0.09, 1.59, 0.11, -0.11],
"GG": [0.06, 1.08, -0.28, 0.56, -0.82, 0.24],
"GC": [-0.08, 0.22, 2.3, -0.35, 0.65, 1.59],
"AT": [1.07, 0.22, 0.83, -1.02, 2.51, 1.17],
"GA": [-0.08, 0.5, 0.09, 0.13, -0.39, 0.71],
"TG": [-1.38, -1.36, -1.01, -0.86, -0.62, -1.25],
"TA": [-1.23, -2.37, -1.38, -2.24, -1.51, -1.39],
"TC": [-0.08, 0.5, 0.09, 0.13, -0.39, 0.71],
"CT": [0.78, 0.36, -0.28, 0.68, -0.24, -0.62],
}
sequence_list = GetData(input_data)
phyche_value = ExtendPhycheIndex(original_phyche_value, extra_phyche_index)
return sequence_list, phyche_value
def GetSequenceListAndPhycheValue(
input_data, k, phyche_index, extra_phyche_index, all_property
):
"""For PseKNC-general make sequence_list and phyche_value.
:param input_data: file type or handle.
:param k: int, the value of k-tuple.
:param k: physicochemical properties list.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
:param all_property: bool, choose all physicochemical properties or not.
"""
if phyche_index is None:
phyche_index = []
if extra_phyche_index is None:
extra_phyche_index = {}
diphyche_list = [
"Base stacking",
"Protein induced deformability",
"B-DNA twist",
"Dinucleotide GC Content",
"A-philicity",
"Propeller twist",
"Duplex stability:(freeenergy)",
"Duplex tability(disruptenergy)",
"DNA denaturation",
"Bending stiffness",
"Protein DNA twist",
"Stabilising energy of Z-DNA",
"Aida_BA_transition",
"Breslauer_dG",
"Breslauer_dH",
"Breslauer_dS",
"Electron_interaction",
"Hartman_trans_free_energy",
"Helix-Coil_transition",
"Ivanov_BA_transition",
"Lisser_BZ_transition",
"Polar_interaction",
"SantaLucia_dG",
"SantaLucia_dH",
"SantaLucia_dS",
"Sarai_flexibility",
"Stability",
"Stacking_energy",
"Sugimoto_dG",
"Sugimoto_dH",
"Sugimoto_dS",
"Watson-Crick_interaction",
"Twist",
"Tilt",
"Roll",
"Shift",
"Slide",
"Rise",
]
triphyche_list = [
"Dnase I",
"Bendability (DNAse)",
"Bendability (consensus)",
"Trinucleotide GC Content",
"Nucleosome positioning",
"Consensus_roll",
"Consensus-Rigid",
"Dnase I-Rigid",
"MW-Daltons",
"MW-kg",
"Nucleosome",
"Nucleosome-Rigid",
]
# Set and check physicochemical properties.
phyche_list = []
if k == 2:
phyche_list = diphyche_list
elif k == 3:
phyche_list = triphyche_list
try:
if all_property is True:
phyche_index = phyche_list
else:
for e in phyche_index:
if e not in phyche_list:
error_info = (
"Sorry, the physicochemical properties " + e + " is not exit."
)
raise NameError(error_info)
except NameError:
raise
# Generate phyche_value and sequence_list.
from PyBioMed.PyDNA.PyDNApsenacutil import GetPhycheIndex
phyche_value = ExtendPhycheIndex(
GetPhycheIndex(k, phyche_index), extra_phyche_index
)
sequence_list = GetData(input_data)
return sequence_list, phyche_value
def GetPseDNC(input_data, **kwargs):
"""Make PseDNC dictionary.
:param input_data: file type or handle.
:param k: k-tuple.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if "lamada" in kwargs:
lamada = kwargs["lamada"]
else:
lamada = 3
if "w" in kwargs:
w = kwargs["w"]
else:
w = 0.05
if "k" in kwargs:
k = kwargs["k"]
else:
k = 2
if "extra_phyche_index" in kwargs:
kwargs = kwargs["extra_phyche_index"]
else:
extra_phyche_index = None
input_data = [input_data]
sequence_list, phyche_value = GetSequenceListAndPhycheValuePsednc(
input_data, extra_phyche_index
)
from PyBioMed.PyDNA.PyDNApsenacutil import MakePsekncVector
vector = MakePsekncVector(sequence_list, lamada, w, k, phyche_value, theta_type=1)
dict_keys = ["PseDNC_%s" % i for i in range(1, len(vector[0]) + 1)]
res = dict(zip(dict_keys, vector[0]))
return res
def GetPseKNC(input_data, **kwargs):
"""Make PseKNC dictionary.
:param input_data: file type or handle.
:param k: k-tuple.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if "lamada" in kwargs:
lamada = kwargs["lamada"]
else:
lamada = 1
if "w" in kwargs:
w = kwargs["w"]
else:
w = 0.5
if "k" in kwargs:
k = kwargs["k"]
else:
k = 3
if "extra_phyche_index" in kwargs:
kwargs = kwargs["extra_phyche_index"]
else:
extra_phyche_index = None
input_data = [input_data]
sequence_list, phyche_value = GetSequenceListAndPhycheValuePseknc(
input_data, extra_phyche_index
)
from PyBioMed.PyDNA.PyDNApsenacutil import MakeOldPsekncVector
vector = MakeOldPsekncVector(
sequence_list, lamada, w, k, phyche_value, theta_type=1
)
dict_keys = ["PseKNC_%s" % i for i in range(1, len(vector[0]) + 1)]
res = dict(zip(dict_keys, vector[0]))
return res
def GetPCPseDNC(input_data, **kwargs):
"""Make a PCPseDNC dictionary.
:param input_data: file object or sequence list.
:param phyche_index: physicochemical properties list.
:param all_property: choose all physicochemical properties or not.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if "lamada" in kwargs:
lamada = kwargs["lamada"]
else:
lamada = 1
if "w" in kwargs:
w = kwargs["w"]
else:
w = 0.05
if "k" in kwargs:
k = kwargs["k"]
else:
k = 2
if "phyche_index" in kwargs:
phyche_index = kwargs["phyche_index"]
else:
phyche_index = None
if "all_property" in kwargs:
all_property = kwargs["all_property"]
else:
all_property = False
if "extra_phyche_index" in kwargs:
extra_phyche_index = kwargs["extra_phyche_index"]
else:
extra_phyche_index = None
# Make vector.
input_data = [input_data]
sequence_list, phyche_value = GetSequenceListAndPhycheValue(
input_data, k, phyche_index, extra_phyche_index, all_property
)
from PyBioMed.PyDNA.PyDNApsenacutil import MakePsekncVector
vector = MakePsekncVector(sequence_list, lamada, w, k, phyche_value, theta_type=1)
dict_keys = ["PCPseDNC_%s" % i for i in range(1, len(vector[0]) + 1)]
res = dict(zip(dict_keys, vector[0]))
return res
def GetPCPseTNC(input_data, **kwargs):
"""Make a PCPseDNC dictionary.
:param input_data: file object or sequence list.
:param phyche_index: physicochemical properties list.
:param all_property: choose all physicochemical properties or not.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if "lamada" in kwargs:
lamada = kwargs["lamada"]
else:
lamada = 1
if "w" in kwargs:
w = kwargs["w"]
else:
w = 0.05
if "k" in kwargs:
k = kwargs["k"]
else:
k = 3
if "phyche_index" in kwargs:
phyche_index = kwargs["phyche_index"]
else:
phyche_index = None
if "all_property" in kwargs:
all_property = kwargs["all_property"]
else:
all_property = False
if "extra_phyche_index" in kwargs:
extra_phyche_index = kwargs["extra_phyche_index"]
else:
extra_phyche_index = None
input_data = [input_data]
sequence_list, phyche_value = GetSequenceListAndPhycheValue(
input_data, k, phyche_index, extra_phyche_index, all_property
)
# Make vector.
from PyBioMed.PyDNA.PyDNApsenacutil import MakePsekncVector
vector = MakePsekncVector(sequence_list, lamada, w, k, phyche_value, theta_type=1)
dict_keys = ["PCPseTNC_%s" % i for i in range(1, len(vector[0]) + 1)]
res = dict(zip(dict_keys, vector[0]))
return res
def GetSCPseDNC(input_data, **kwargs):
"""Make a SCPseDNC dictionary.
:param input_data: file object or sequence list.
:param phyche_index: physicochemical properties list.
:param all_property: choose all physicochemical properties or not.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if "lamada" in kwargs:
lamada = kwargs["lamada"]
else:
lamada = 1
if "w" in kwargs:
w = kwargs["w"]
else:
w = 0.05
if "k" in kwargs:
k = kwargs["k"]
else:
k = 2
if "phyche_index" in kwargs:
phyche_index = kwargs["phyche_index"]
else:
phyche_index = None
if "all_property" in kwargs:
all_property = kwargs["all_property"]
else:
all_property = False
if "extra_phyche_index" in kwargs:
extra_phyche_index = kwargs["extra_phyche_index"]
else:
extra_phyche_index = None
input_data = [input_data]
sequence_list, phyche_value = GetSequenceListAndPhycheValue(
input_data, k, phyche_index, extra_phyche_index, all_property
)
# Make vector.
from PyBioMed.PyDNA.PyDNApsenacutil import MakePsekncVector
vector = MakePsekncVector(sequence_list, lamada, w, k, phyche_value, theta_type=2)
dict_keys = ["SCPseDNC_%s" % i for i in range(1, len(vector[0]) + 1)]
res = dict(zip(dict_keys, vector[0]))
return res
def GetSCPseTNC(input_data, **kwargs):
"""Make a SCPseTNC dictionary.
:param input_data: file object or sequence list.
:param phyche_index: physicochemical properties list.
:param all_property: choose all physicochemical properties or not.
:param extra_phyche_index: dict, the key is the dinucleotide (string),
the value is its physicochemical property value (list).
It means the user-defined physicochemical indices.
"""
if "lamada" in kwargs:
lamada = kwargs["lamada"]
else:
lamada = 1
if "w" in kwargs:
w = kwargs["w"]
else:
w = 0.05
if "k" in kwargs:
k = kwargs["k"]
else:
k = 3
if "phyche_index" in kwargs:
phyche_index = kwargs["phyche_index"]
else:
phyche_index = None
if "all_property" in kwargs:
all_property = kwargs["all_property"]
else:
all_property = False
if "extra_phyche_index" in kwargs:
extra_phyche_index = kwargs["extra_phyche_index"]
else:
extra_phyche_index = None
input_data = [input_data]
sequence_list, phyche_value = GetSequenceListAndPhycheValue(
input_data, k, phyche_index, extra_phyche_index, all_property
)
# Make vector.
from PyBioMed.PyDNA.PyDNApsenacutil import MakePsekncVector
vector = MakePsekncVector(sequence_list, lamada, w, k, phyche_value, theta_type=2)
dict_keys = ["SCPseTNC_%s" % i for i in range(1, len(vector[0]) + 1)]
res = dict(zip(dict_keys, vector[0]))
return res
if __name__ == "__main__":
psednc = GetPseDNC("ACCCCA", lamada=2, w=0.05)
print(psednc)
PC_psednc = GetPCPseDNC(
"ACCCCA",
phyche_index=["Tilt", "Twist", "Rise", "Roll", "Shift", "Slide"],
lamada=2,
w=0.05,
)
print(PC_psednc)
pc_psetnc = GetPCPseTNC(
"ACCCCA", phyche_index=["Dnase I", "Nucleosome"], lamada=2, w=0.05
)
print(pc_psetnc)
sc_psednc = GetSCPseDNC("ACCCCCA", phyche_index=["Twist", "Tilt"], lamada=2, w=0.05)
print(sc_psednc)
sc_psetnc = GetSCPseTNC(
"ACCCCCA", phyche_index=["Dnase I", "Nucleosome"], lamada=1, w=0.05
)
print(sc_psetnc)
sc_psetnc = GetSCPseTNC(
"ACCCCA", phyche_index=["Dnase I", "Nucleosome"], lamada=2, w=0.05
)
print(sc_psetnc)
import time
from PyBioMed.PyDNA.PyDNAutil import NormalizeIndex
start_time = time.time()
phyche_index = [
[
1.019,
-0.918,
0.488,
0.567,
0.567,
-0.070,
-0.579,
0.488,
-0.654,
-2.455,
-0.070,
-0.918,
1.603,
-0.654,
0.567,
1.019,
]
]
print("Begin PseDNC")
dic = GetPseDNC("GACTGAACTGCACTTTGGTTTCATATTATTTGCTC")
print(dic)
print(len(dic))
dic = GetPseKNC("GACTGAACTGCACTTTGGTTTCATATTATTTGCTC")
print(dic)
print(len(dic))
print("PC-PseDNC")
dic = GetPCPseDNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC", phyche_index=["Twist", "Tilt"]
)
print(dic)
print(len(dic))
dic = GetPCPseTNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC",
lamada=1,
w=0.05,
k=2,
phyche_index=["Twist", "Tilt"],
)
print(dic)
print(len(dic))
phyche_index = [
[
7.176,
6.272,
4.736,
7.237,
3.810,
4.156,
4.156,
6.033,
3.410,
3.524,
4.445,
6.033,
1.613,
5.087,
2.169,
7.237,
3.581,
3.239,
1.668,
2.169,
6.813,
3.868,
5.440,
4.445,
3.810,
4.678,
5.440,
4.156,
2.673,
3.353,
1.668,
4.736,
4.214,
3.925,
3.353,
5.087,
2.842,
2.448,
4.678,
3.524,
3.581,
2.448,
3.868,
4.156,
3.467,
3.925,
3.239,
6.272,
2.955,
3.467,
2.673,
1.613,
1.447,
3.581,
3.810,
3.410,
1.447,
2.842,
6.813,
3.810,
2.955,
4.214,
3.581,
7.176,
]
]
from PyBioMed.PyDNA.PyDNAutil import NormalizeIndex
dic = GetPCPseTNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC",
phyche_index=["Dnase I", "Nucleosome"],
extra_phyche_index=NormalizeIndex(phyche_index, is_convert_dict=True),
)
print(dic)
print(len(dic))
print("SC-PseDNC")
dic = GetSCPseDNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC", phyche_index=["Twist", "Tilt"]
)
print(dic)
print(len(dic))
dic = GetSCPseDNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC", all_property=True, lamada=2, w=0.05
)
print(dic)
print(len(dic))
phyche_index = [
[
1.019,
-0.918,
0.488,
0.567,
0.567,
-0.070,
-0.579,
0.488,
-0.654,
-2.455,
-0.070,
-0.918,
1.603,
-0.654,
0.567,
1.019,
]
]
from PyBioMed.PyDNA.PyDNAutil import NormalizeIndex
dic = GetSCPseDNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC",
phyche_index=["Twist", "Tilt"],
extra_phyche_index=NormalizeIndex(phyche_index, is_convert_dict=True),
)
print(dic)
print(len(dic))
print()
print("SC-PseTNC")
dic = GetSCPseTNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC", phyche_index=["Dnase I", "Nucleosome"]
)
print(dic)
print(len(dic))
dic = GetSCPseTNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC", all_property=True, lamada=2, w=0.05
)
print(dic)
print(len(dic))
phyche_index = [
[
7.176,
6.272,
4.736,
7.237,
3.810,
4.156,
4.156,
6.033,
3.410,
3.524,
4.445,
6.033,
1.613,
5.087,
2.169,
7.237,
3.581,
3.239,
1.668,
2.169,
6.813,
3.868,
5.440,
4.445,
3.810,
4.678,
5.440,
4.156,
2.673,
3.353,
1.668,
4.736,
4.214,
3.925,
3.353,
5.087,
2.842,
2.448,
4.678,
3.524,
3.581,
2.448,
3.868,
4.156,
3.467,
3.925,
3.239,
6.272,
2.955,
3.467,
2.673,
1.613,
1.447,
3.581,
3.810,
3.410,
1.447,
2.842,
6.813,
3.810,
2.955,
4.214,
3.581,
7.176,
]
]
from PyBioMed.PyDNA.PyDNAutil import NormalizeIndex
dic = GetSCPseTNC(
"GACTGAACTGCACTTTGGTTTCATATTATTTGCTC",
phyche_index=["Dnase I", "Nucleosome"],
extra_phyche_index=NormalizeIndex(phyche_index, is_convert_dict=True),
)
print(dic)
print(len(dic))
# Normalize PseDNC index Twist, Tilt, Roll, Shift, Slide, Rise.
original_phyche_value = [
[
0.026,
0.036,
0.031,
0.033,
0.016,
0.026,
0.014,
0.031,
0.025,
0.025,
0.026,
0.036,
0.017,
0.025,
0.016,
0.026,
],
[
0.038,
0.038,
0.037,
0.036,
0.025,
0.042,
0.026,
0.037,
0.038,
0.036,
0.042,
0.038,
0.018,
0.038,
0.025,
0.038,
],
[
0.020,
0.023,
0.019,
0.022,
0.017,
0.019,
0.016,
0.019,
0.020,
0.026,
0.019,
0.023,
0.016,
0.020,
0.017,
0.020,
],
[
1.69,
1.32,
1.46,
1.03,
1.07,
1.43,
1.08,
1.46,
1.32,
1.20,
1.43,
1.32,
0.72,
1.32,
1.07,
1.69,
],
[
2.26,
3.03,
2.03,
3.83,
1.78,
1.65,
2.00,
2.03,
1.93,
2.61,
1.65,
3.03,
1.20,
1.93,
1.78,
2.26,
],
[
7.65,
8.93,
7.08,
9.07,
6.38,
8.04,
6.23,
7.08,
8.56,
9.53,
8.04,
8.93,
6.23,
8.56,
6.38,
7.65,
],
]
for e in NormalizeIndex(original_phyche_value, is_convert_dict=True).items():
print(e)
|
|
import datetime
import mongoengine as mongo
import urllib2
import redis
from django.conf import settings
from apps.social.models import MSharedStory
from apps.profile.models import Profile
from apps.statistics.rstats import RStats, round_time
from utils import json_functions as json
from utils import db_functions
from utils import log as logging
class MStatistics(mongo.Document):
key = mongo.StringField(unique=True)
value = mongo.DynamicField()
meta = {
'collection': 'statistics',
'allow_inheritance': False,
'indexes': ['key'],
}
def __unicode__(self):
return "%s: %s" % (self.key, self.value)
@classmethod
def get(cls, key, default=None):
obj = cls.objects.filter(key=key).first()
if not obj:
return default
return obj.value
@classmethod
def set(cls, key, value):
obj, _ = cls.objects.get_or_create(key=key)
obj.value = value
obj.save()
@classmethod
def all(cls):
stats = cls.objects.all()
values = dict([(stat.key, stat.value) for stat in stats])
for key, value in values.items():
if key in ('avg_time_taken', 'sites_loaded', 'stories_shared'):
values[key] = json.decode(value)
elif key in ('feeds_fetched', 'premium_users', 'standard_users', 'latest_sites_loaded',
'max_sites_loaded', 'max_stories_shared'):
values[key] = int(value)
elif key in ('latest_avg_time_taken', 'max_avg_time_taken'):
values[key] = float(value)
values['total_sites_loaded'] = sum(values['sites_loaded']) if 'sites_loaded' in values else 0
values['total_stories_shared'] = sum(values['stories_shared']) if 'stories_shared' in values else 0
return values
@classmethod
def collect_statistics(cls):
now = datetime.datetime.now()
cls.collect_statistics_premium_users()
print "Premiums: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_standard_users()
print "Standard users: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_sites_loaded()
print "Sites loaded: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_stories_shared()
print "Stories shared: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_for_db()
print "DB Stats: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_feeds_fetched()
print "Feeds Fetched: %s" % (datetime.datetime.now() - now)
@classmethod
def collect_statistics_feeds_fetched(cls):
feeds_fetched = RStats.count('feed_fetch', hours=24)
cls.objects(key='feeds_fetched').update_one(upsert=True,
set__key='feeds_fetched',
set__value=feeds_fetched)
return feeds_fetched
@classmethod
def collect_statistics_premium_users(cls):
last_day = datetime.datetime.now() - datetime.timedelta(hours=24)
premium_users = Profile.objects.filter(last_seen_on__gte=last_day, is_premium=True).count()
cls.objects(key='premium_users').update_one(upsert=True, set__key='premium_users', set__value=premium_users)
return premium_users
@classmethod
def collect_statistics_standard_users(cls):
last_day = datetime.datetime.now() - datetime.timedelta(hours=24)
standard_users = Profile.objects.filter(last_seen_on__gte=last_day, is_premium=False).count()
cls.objects(key='standard_users').update_one(upsert=True, set__key='standard_users', set__value=standard_users)
return standard_users
@classmethod
def collect_statistics_sites_loaded(cls):
now = round_time(datetime.datetime.now(), round_to=60)
sites_loaded = []
avg_time_taken = []
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
for hour in range(24):
start_hours_ago = now - datetime.timedelta(hours=hour+1)
pipe = r.pipeline()
for m in range(60):
minute = start_hours_ago + datetime.timedelta(minutes=m)
key = "%s:%s" % (RStats.stats_type('page_load'), minute.strftime('%s'))
pipe.get("%s:s" % key)
pipe.get("%s:a" % key)
times = pipe.execute()
counts = [int(c) for c in times[::2] if c]
avgs = [float(a) for a in times[1::2] if a]
if counts and avgs:
count = sum(counts)
avg = round(sum(avgs) / count, 3)
else:
count = 0
avg = 0
sites_loaded.append(count)
avg_time_taken.append(avg)
sites_loaded.reverse()
avg_time_taken.reverse()
values = (
('sites_loaded', json.encode(sites_loaded)),
('avg_time_taken', json.encode(avg_time_taken)),
('latest_sites_loaded', sites_loaded[-1]),
('latest_avg_time_taken', avg_time_taken[-1]),
('max_sites_loaded', max(sites_loaded)),
('max_avg_time_taken', max(1, max(avg_time_taken))),
)
for key, value in values:
cls.objects(key=key).update_one(upsert=True, set__key=key, set__value=value)
@classmethod
def collect_statistics_stories_shared(cls):
now = datetime.datetime.now()
stories_shared = []
for hour in range(24):
start_hours_ago = now - datetime.timedelta(hours=hour)
end_hours_ago = now - datetime.timedelta(hours=hour+1)
shares = MSharedStory.objects.filter(
shared_date__lte=start_hours_ago,
shared_date__gte=end_hours_ago
).count()
stories_shared.append(shares)
stories_shared.reverse()
values = (
('stories_shared', json.encode(stories_shared)),
('latest_stories_shared', stories_shared[-1]),
('max_stories_shared', max(stories_shared)),
)
for key, value in values:
cls.objects(key=key).update_one(upsert=True, set__key=key, set__value=value)
@classmethod
def collect_statistics_for_db(cls):
lag = db_functions.mongo_max_replication_lag(settings.MONGODB)
cls.set('mongodb_replication_lag', lag)
class MFeedback(mongo.Document):
date = mongo.StringField()
summary = mongo.StringField()
subject = mongo.StringField()
url = mongo.StringField()
style = mongo.StringField()
order = mongo.IntField()
meta = {
'collection': 'feedback',
'allow_inheritance': False,
'indexes': ['style'],
'ordering': ['order'],
}
def __unicode__(self):
return "%s: (%s) %s" % (self.style, self.date, self.subject)
@classmethod
def collect_feedback(cls):
data = urllib2.urlopen('https://getsatisfaction.com/newsblur/topics.widget').read()
data = json.decode(data[1:-1])
i = 0
if len(data):
cls.objects.delete()
for feedback in data:
feedback['order'] = i
i += 1
for removal in ['about', 'less than']:
if removal in feedback['date']:
feedback['date'] = feedback['date'].replace(removal, '')
for feedback in data:
# Convert unicode to strings.
fb = dict([(str(k), v) for k, v in feedback.items()])
cls.objects.create(**fb)
@classmethod
def all(cls):
feedbacks = cls.objects.all()[:4]
return feedbacks
class MAnalyticsFetcher(mongo.Document):
date = mongo.DateTimeField(default=datetime.datetime.now)
feed_id = mongo.IntField()
feed_fetch = mongo.FloatField()
feed_process = mongo.FloatField()
page = mongo.FloatField()
icon = mongo.FloatField()
total = mongo.FloatField()
server = mongo.StringField()
feed_code = mongo.IntField()
meta = {
'db_alias': 'nbanalytics',
'collection': 'feed_fetches',
'allow_inheritance': False,
'indexes': ['date', 'feed_id', 'server', 'feed_code'],
'ordering': ['date'],
}
def __unicode__(self):
return "%s: %.4s+%.4s+%.4s+%.4s = %.4ss" % (self.feed_id, self.feed_fetch,
self.feed_process,
self.page,
self.icon,
self.total)
@classmethod
def add(cls, feed_id, feed_fetch, feed_process,
page, icon, total, feed_code):
server_name = settings.SERVER_NAME
if 'app' in server_name: return
if icon and page:
icon -= page
if page and feed_process:
page -= feed_process
elif page and feed_fetch:
page -= feed_fetch
if feed_process and feed_fetch:
feed_process -= feed_fetch
cls.objects.create(feed_id=feed_id, feed_fetch=feed_fetch,
feed_process=feed_process,
page=page, icon=icon, total=total,
server=server_name, feed_code=feed_code)
@classmethod
def calculate_stats(cls, stats):
return cls.aggregate(**stats)
@classmethod
def clean(cls, days=1):
last_day = datetime.datetime.now() - datetime.timedelta(days=days)
from utils.feed_functions import timelimit, TimeoutError
@timelimit(60)
def delete_old_history():
cls.objects(date__lte=last_day).delete()
cls.objects(date__lte=last_day).delete()
try:
delete_old_history()
except TimeoutError:
logging.debug("~SK~SB~BR~FWTimed out on deleting old fetch history. Shit.")
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._bindings_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BindingsOperations:
"""BindingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2020_11_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> "_models.BindingResource":
"""Get a Binding and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BindingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2020_11_01_preview.models.BindingResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> "_models.BindingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(binding_resource, 'BindingResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> AsyncLROPoller["_models.BindingResource"]:
"""Create a new Binding or update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:param binding_resource: Parameters for the create or update operation.
:type binding_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.BindingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.BindingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
binding_resource=binding_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Operation to delete a Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> "_models.BindingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(binding_resource, 'BindingResource')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BindingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
service_name: str,
app_name: str,
binding_name: str,
binding_resource: "_models.BindingResource",
**kwargs: Any
) -> AsyncLROPoller["_models.BindingResource"]:
"""Operation to update an exiting Binding.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:param binding_name: The name of the Binding resource.
:type binding_name: str
:param binding_resource: Parameters for the update operation.
:type binding_resource: ~azure.mgmt.appplatform.v2020_11_01_preview.models.BindingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BindingResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2020_11_01_preview.models.BindingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
binding_name=binding_name,
binding_resource=binding_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('BindingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings/{bindingName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
app_name: str,
**kwargs: Any
) -> AsyncIterable["_models.BindingResourceCollection"]:
"""Handles requests to list all resources in an App.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param app_name: The name of the App resource.
:type app_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BindingResourceCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2020_11_01_preview.models.BindingResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BindingResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
app_name=app_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("BindingResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/apps/{appName}/bindings'} # type: ignore
|
|
from sklearn.datasets import base as bunch
from learner.strategy import Joint, Sequential
import numpy as np
from nltk import RegexpTokenizer
from nltk.stem import PorterStemmer
def sample_data(data, train_idx, test_idx):
sample = bunch.Bunch(train=bunch.Bunch(), test=bunch.Bunch())
if len(test_idx) > 0: #if there are test indexes
sample.train.data = np.array(data.data, dtype=object)[train_idx]
sample.test.data = np.array(data.data, dtype=object)[test_idx]
sample.train.target = data.target[train_idx]
sample.test.target = data.target[test_idx]
sample.train.bow = data.bow[train_idx]
sample.test.bow = data.bow[test_idx]
sample.target_names = data.target_names
sample.train.remaining = []
else:
## Just shuffle the data
sample = data
data_lst = np.array(data.train.data, dtype=object)
data_lst = data_lst[train_idx]
sample.train.data = data_lst
sample.train.target = data.train.target[train_idx]
sample.train.bow = data.train.bow[train_idx]
sample.train.remaining = []
return sample.train, sample.test
def stemming(doc):
wnl = PorterStemmer()
mytokenizer = RegexpTokenizer('\\b\\w+\\b')
return [wnl.stem(t) for t in mytokenizer.tokenize(doc)]
def get_vectorizer(config):
limit = config['limit']
vectorizer = config['vectorizer']
min_size = config['min_size']
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
if vectorizer == 'tfidf':
return TfidfVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=False, ngram_range=(1, 1))
elif vectorizer == "tfidfvocab":
vocab = open(config['vocabulary']).readlines()
vocab = [v.strip() for v in vocab]
return TfidfVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=False, ngram_range=(1, 1),
vocabulary=vocab)
elif vectorizer == 'bow':
from datautils import StemTokenizer
return CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
else:
return None
def get_classifier(cl_name, **kwargs):
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
clf = None
if cl_name is not None:
if cl_name in "mnb":
alpha = 1
if 'parameter' in kwargs:
alpha = kwargs['parameter']
clf = MultinomialNB(alpha=alpha)
elif cl_name == "lr" or cl_name == "lrl1":
c = 1
if 'parameter' in kwargs:
c = kwargs['parameter']
clf = LogisticRegression(penalty="l1", C=c)
elif cl_name == "lrl2":
c = 1
if 'parameter' in kwargs:
c = kwargs['parameter']
clf = LogisticRegression(penalty="l2", C=c)
else:
raise ValueError("We need a classifier name for the student [lr|mnb]")
return clf
def get_learner(learn_config, vct=None, sent_tk=None, seed=None, cost_model=None):
from learner.base import Learner
cl_name = learn_config['model']
clf = get_classifier(cl_name, parameter=learn_config['parameter'])
learner = Learner(clf)
if learn_config['type'] == 'joint':
learner = Joint(clf, snippet_fn=None, utility_fn=None, seed=seed)
elif learn_config['type'] == 'sequential':
learner = Sequential(clf, snippet_fn=None, utility_fn=None, seed=seed)
elif learn_config['type'] == 'sequential_single':
from learner.single_student import SequentialSingleStudent
learner = SequentialSingleStudent(clf, snippet_fn=None, utility_fn=None, seed=seed)
elif learn_config['type'] == 'joint_single':
from learner.single_student import JointSingleStudent
learner = JointSingleStudent(clf, snippet_fn=None, utility_fn=None, seed=seed)
else:
raise ValueError("We don't know {} leaner".format(learn_config['type']))
learner.set_utility(learn_config['utility'])
learner.set_snippet_utility(learn_config['snippet'])
learner.set_sent_tokenizer(sent_tk)
learner.set_calibration_method(learn_config['calibration'])
learner.set_vct(vct)
learner.set_cost_model(cost_model)
learner.set_cost_fn(get_costfn(learn_config['cost_function']))
return learner
def get_expert(config, size=None):
from expert.experts import PredictingExpert, SentenceExpert, \
TrueExpert, NoisyExpert, ReluctantSentenceExpert, ReluctantDocumentExpert, \
PerfectReluctantDocumentExpert, TrueReluctantExpert
from expert.noisy_expert import NoisyReluctantDocumentExpert
cl_name = config['model']
clf = get_classifier(cl_name, parameter=config['parameter'])
if config['type'] == 'true':
expert = TrueExpert(None)
elif config['type'] == 'pred':
expert = PredictingExpert(clf)
elif config['type'] == 'sent':
tk = get_tokenizer(config['sent_tokenizer'])
expert = SentenceExpert(clf, tokenizer=tk)
elif config['type'] == 'noisy':
p = config['noise_p']
expert = NoisyExpert(None, p)
elif config['type'] == 'neutral':
p = config['threshold']
tk = get_tokenizer(config['sent_tokenizer'])
expert = ReluctantSentenceExpert(clf, p, tokenizer=tk)
elif config['type'] == 'docneutral' or config['type'] == 'noisyreluctant' :
p = config['threshold']
expert = ReluctantDocumentExpert(clf, p)
elif config['type'] == 'perfectreluctant': # reluctant based on unc threshold
p = config['threshold']
expert = PerfectReluctantDocumentExpert(clf, p)
elif config['type'] == 'noisyreluctantscale': # reluctant based on unc threshold, noisy based on CE
p = config['threshold']
args = {'factor': config['scale'], 'data_size': size}
expert = NoisyReluctantDocumentExpert(clf, p, **args)
elif config['type'] == 'truereluctant': # reluctant based on p probability
p = config['neutral_p']
expert = TrueReluctantExpert(None, p)
elif config['type']== 'amtexpert':
from expert.amt_expert import AMTExpert
expert = AMTExpert(None)
else:
raise Exception("We don't know {} expert".format(config['type']))
return expert
def get_bootstrap(config):
bt = config['bootstrap']
if 'bootstrap_type' in config:
mt = config['bootstrap_type']
else:
mt = None
return bt, mt
def get_tokenizer(tk_name, **kwargs):
if tk_name == 'nltk':
import nltk
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
return sent_detector
elif tk_name == 'twits' or tk_name == 'tweets':
from twit_token import TwitterSentenceTokenizer
sent_detector = TwitterSentenceTokenizer()
return sent_detector
elif tk_name == 'amt-sent' or tk_name == 'amt':
from amt_tokenizer import AMTSentenceTokenizer
sent_detector = AMTSentenceTokenizer()
return sent_detector
elif tk_name == 'snippet':
from snippet_tokenizer import SnippetTokenizer
k = (1,1)
if 'snip_size' in kwargs:
k = kwargs['snip_size']
sent_detector = SnippetTokenizer(k=k)
return sent_detector
elif tk_name == 'first1snippet':
from snippet_tokenizer import First1SnippetTokenizer
k = (1,1)
if 'snip_size' in kwargs:
k = kwargs['snip_size']
sent_detector = First1SnippetTokenizer(k=k)
return sent_detector
elif tk_name == 'random1snippet':
from snippet_tokenizer import Random1SnippetTokenizer
k = (1,1)
if 'snip_size' in kwargs:
k = kwargs['snip_size']
sent_detector = Random1SnippetTokenizer(k=k)
return sent_detector
elif tk_name == 'windowsnippet':
from snippet_tokenizer import WindowSnippetTokenizer
k = (1,1)
if 'snip_size' in kwargs:
k = kwargs['snip_size']
sent_detector = WindowSnippetTokenizer(k=k)
return sent_detector
elif tk_name == 'firstksnippet':
from snippet_tokenizer import FirstWindowSnippetTokenizer
k = (1,1)
if 'snip_size' in kwargs:
k = kwargs['snip_size']
sent_detector = FirstWindowSnippetTokenizer(k=k)
return sent_detector
else:
raise Exception("Unknown sentence tokenizer")
def get_costfn(fn_name):
from costutils import intra_cost, unit_cost
if fn_name == 'unit':
return unit_cost
elif fn_name == 'variable_cost':
return intra_cost
else:
raise Exception("Unknown cost function")
# def unit_cost(X):
# return X.shape[0]
def print_file(cost, mean, std, f):
# f = open(file_name, "w")
f.write("COST\tMEAN\tSTDEV\n")
for a, b, c in zip(cost, mean, std):
f.write("{0:.3f}\t{1:.3f}\t{2:.3f}\n".format(a, b, c))
f.close()
def print_cm_file(cost, mean, std, f):
# f = open(file_name, "w")
f.write("COST\tT0\tF1\tF0\tT1\tSTDEV\n")
for a, b, c in zip(cost, mean, std):
f.write("{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}\t{4:.3f}\n".format(a, *b))
f.close()
|
|
import datetime
import logging
import os
import time
import hashlib
import json
import random
import requests
import signal
import socket
import subprocess
import sys
import traceback
import datetime
from datetime import timedelta
import tarfile
import copy
import shutil
import tempfile
import io
import locale
import json
import pymongo
import argparse
#-------------------------------------------
#This part (class RucioAPI) is copied from a RucioAPI class which is under development by Boris Bauermeister
#right now (07/07/18). Most likely this development will not be finished and the code goes to
#future XENONnT developments for Rucio in XENONnT. For any question ever: [email protected]
class RucioAPI():
def __init__(self):
self.vlevel = 1
def SetAccount(self, account):
self.account=account
def SetHost(self, host):
self.host = host
def LoadProxy(self, path_to_proxy=None):
if path_to_proxy == None:
print("Add the path to your proxy ticket")
return 0
else:
self.path_proxy = path_to_proxy
def GetConfig(self):
varStash = """
#Source Python2.7 and rucio
module load python/2.7
source /cvmfs/xenon.opensciencegrid.org/software/rucio-py27/setup_rucio_1_8_3.sh
#Configure the rucio environment
export RUCIO_HOME=/cvmfs/xenon.opensciencegrid.org/software/rucio-py27/1.8.3/rucio
export RUCIO_ACCOUNT={rucio_account}
#Set location of the proxy:
export X509_USER_PROXY={x509_user_proxy}
"""
varXe1t = """
export PATH=/home/xe1ttransfer/.local/bin:$PATH
export RUCIO_HOME=~/.local/rucio
export RUCIO_ACCOUNT={rucio_account}
#Set location of the proxy:
export X509_USER_PROXY={x509_user_proxy}
"""
varMidway = """
source /cvmfs/xenon.opensciencegrid.org/software/rucio-py26/setup_rucio_1_8_3.sh
export RUCIO_HOME=/cvmfs/xenon.opensciencegrid.org/software/rucio-py26/1.8.3/rucio/
export RUCIO_ACCOUNT={rucio_account}
export X509_USER_PROXY={x509_user_proxy}
"""
varMidway2 = """
source /cvmfs/xenon.opensciencegrid.org/software/rucio-py27/setup_rucio_1_8_3.sh
export RUCIO_HOME=/cvmfs/xenon.opensciencegrid.org/software/rucio-py27/1.8.3/rucio/
source /cvmfs/oasis.opensciencegrid.org/osg-software/osg-wn-client/3.3/current/el7-x86_64/setup.sh
export RUCIO_ACCOUNT={rucio_account}
export X509_USER_PROXY={x509_user_proxy}
"""
varDummy = """
echo "Rucio configuration is missing"
export RUCIO_ACCOUNT={rucio_account}
export X509_USER_PROXY={x509_user_proxy}
"""
if self.host=="xe1t-datamanager":
return varXe1t
elif self.host=="login":
return varStash
elif self.host=="midway":
return varMidway
elif self.host=="midway2":
return varMidway2
else:
return varDummy
def ConfigHost(self):
self.config = self.GetConfig().format(rucio_account=self.account, x509_user_proxy=self.path_proxy)
def create_script(self, script):
"""Create script as temp file to be run on cluster"""
fileobj = tempfile.NamedTemporaryFile(delete=False,
suffix='.sh',
mode='wt',
buffering=1)
fileobj.write(script)
os.chmod(fileobj.name, 0o774)
return fileobj
def delete_script(self, fileobj):
"""Delete script after submitting to cluster
:param script_path: path to the script to be removed
"""
fileobj.close()
def doRucio(self, upload_string ):
sc = self.create_script( upload_string )
execute = subprocess.Popen( ['sh', sc.name] ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
universal_newlines=False)
stdout_value, stderr_value = execute.communicate()
stdout_value = stdout_value.decode("utf-8")
stdout_value = stdout_value.split("\n")
stdout_value = list(filter(None, stdout_value)) # fastest way to remove '' from list
self.delete_script(sc)
return stdout_value, stderr_value
def Whoami(self):
cmd_adj = "rucio whoami"
cmd = self.config
cmd += cmd_adj
msg, err = self.doRucio(cmd)
for i in msg:
print(i)
def Download(self, scope=None, dname=None, destination=None):
cmd_adj = "rucio download --dir {destination} --no-subdir {scope}:{dname}".format(destination=destination,
scope=scope,
dname=dname)
cmd = self.config
cmd += cmd_adj
msg, err = self.doRucio(cmd)
for i in msg:
print(i)
#Additional functions:
def get_size(start_path = '.'):
#https://stackoverflow.com/questions/1392413/calculating-a-directorys-size-using-python
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def getLEDCalibration(rc_days = 1):
#This function interacts with the XENON1T runDB:
uri = 'mongodb://eb:%[email protected]:27017,copslx50.fysik.su.se:27017,zenigata.uchicago.edu:27017/run'
uri = uri % os.environ.get('MONGO_PASSWORD')
c = pymongo.MongoClient(uri,
replicaSet='runs',
readPreference='secondaryPreferred')
db = c['run']
collection = db['runs_new']
#Create a query of the recent days (rc_days)
# -for LED data
dt_today = datetime.datetime.today()
dt_recent = timedelta(days=rc_days)
dt_begin = dt_today-dt_recent
tags = ['gain_step%d' % i for i in range(5)]
query = {"source.type": "LED",
"start": {'$gt': dt_begin},
}
cursor = collection.find(query)
cursor = list(cursor)
#get rucio:
safer = {}
for i_c in cursor:
run_number = i_c['number']
run_name = i_c['name']
run_date = i_c['start']
run_source = None
run_dbtags = []
if 'tags' in i_c and len(i_c['tags']) > 0:
for itag in i_c['tags']:
run_dbtags.append(itag['name'])
#create a list of remaining led tags which are allowed
remaining_led_tags = [i for i in tags if i in run_dbtags]
if len(remaining_led_tags) <= 0:
continue
if 'source' in i_c:
run_source = i_c['source']['type']
#print(run_source, run_number, run_date)
i_data = None
if 'data' in i_c:
i_data = i_c['data']
else:
continue
rucio_safe = {}
rucio_safe['rucio_rse'] = None
rucio_safe['rucio_rule'] = None
rucio_safe['rucio_location'] = None
rucio_safe['tag'] = run_dbtags
rucio_safe['size'] = i_c['raw_size_byte']
for i_d in i_data:
if i_d['host'] != 'rucio-catalogue':
continue
if i_d['status'] != 'transferred':
continue
rucio_safe['rucio_rse'] = i_d['rse']
rucio_safe['rucio_rule'] = i_d['rule_info']
rucio_safe['rucio_location'] = i_d['location']
safer[run_name]=rucio_safe
return safer
#Main:
def led_purge(led_store=None, purge=-1):
#check for input path first:
if led_store == None:
return False
if purge == -1:
return False
#1) Check for folders in the calibration dir:
dt_today = datetime.datetime.today()
dt_recent = timedelta(days=purge)
dt_begin = dt_today-dt_recent
#Grab only folders which are not hidden and follow the pmt raw data pattern:
level1 = [f for f in os.listdir(led_store) if (not f.startswith('.') and f.startswith('led_raw_data_'))]
purge_level = []
print("Remove folders which are older then {d} days".format(d=purge))
for il in level1:
#stupid condition to get a valid date and be careful to remove too much from the directory
if 'led_raw_data_' not in il:
continue
if len(il.split("_")[3]) != 6:
continue
if 'PMTGainCalibration' == il or 'make_hist' == il or 'gain_calculation' == il:
continue
date_ext = il.split("_")[3]
date_ext = datetime.datetime.strptime(date_ext, '%y%m%d')
rmfolder = os.path.join(led_store, il)
if date_ext < dt_begin:
#shutil.rmtree(rmfolder)
print(" <> folder", rmfolder)
purge_level.append(rmfolder)
else:
print(" KEEP! -> {f}".format(f=rmfolder))
return purge_level
def led_download(led_store=None, get=1):
#Loading the Rucio part:
_account = "xenon-analysis"
_host = "midway2"
_certpro = "/project/lgrandi/xenon1t/grid_proxy/xenon_service_proxy"
print(" <> Load Rucio")
print(" - User {user}".format(user=_account))
print(" - Host config {hc}".format(hc=_host))
rc = RucioAPI()
rc.SetAccount(_account)
rc.SetHost(_host)
rc.LoadProxy(_certpro)
rc.ConfigHost()
print(" <Rucio loaded>")
#check for input path first:
if led_store == None:
return False
#Define some standard paths for LED downloads:
if led_store[-1] != "/":
led_store+="/"
led_dir = "{led_store}led_raw_data_{date}".format(led_store=led_store, date="{date}")
#Get all DB entries about LED files:
led_call = getLEDCalibration(get)
#Analyse the runDB entries before going to the download section
download_success = {}
for key, val in led_call.items():
cal_day = key.split("_")[0]
cal_time= key.split("_")[1]
print("Check paths for {k}".format(k=key))
#check first if date folder exists and create if necessary:
path_to_check = led_dir.format(date=cal_day)
if not os.path.isdir(path_to_check):
os.makedirs(path_to_check)
#check for the subfolders:
path_to_check_sub = os.path.join(path_to_check, "{date}_{time}".format(date=cal_day, time=cal_time) )
if not os.path.isdir(path_to_check_sub):
os.makedirs(path_to_check_sub)
f_size = int(get_size(path_to_check_sub))
f_size_db = int(val['size'])
if f_size == f_size_db:
download_success["{date}_{time}".format(date=cal_day, time=cal_time)] = "Available"
continue
#Download:
print("Start download: {k}".format(k=key))
rc_loc = led_call[ "{date}_{time}".format(date=cal_day, time=cal_time)]['rucio_location']
if rc_loc == None:
continue
rc_scope = rc_loc.split(":")[0]
rc_dname = rc_loc.split(":")[1]
rc.Download(scope=rc_scope, dname=rc_dname, destination=path_to_check_sub)
print(" Downloaded to: ", path_to_check_sub)
#check for success by database and folder comparison:
f_size = int(get_size(path_to_check_sub))
f_size_db = int(val['size'])
if f_size == f_size_db:
download_success["{date}_{time}".format(date=cal_day, time=cal_time)] = True
print("Download success {k}".format(k=download_success))
return download_success
def led_keeper():
parser = argparse.ArgumentParser(description="Submit ruciax tasks to batch queue.")
parser.add_argument('--get', type=int,
help="Get LED data of he past N days (--get <N>) with default N=1",
default=0)
parser.add_argument('--purge', type=int, default=-1,
help="Purge LED data of he past N days (--get <N>) with default N=-1")
args = parser.parse_args()
_get = args.get
_purge = args.purge
#basic path for led calibration:
led_store = "/project/lgrandi/pmt_calibration/PMTGainCalibration/"
##Get all DB entries about LED files:
led_call = getLEDCalibration(_get)
#get a list of dates:
dates = []
for i, j in led_call.items():
date = i.split("_")[0]
if date not in dates:
dates.append(date)
#Do downloads if necessary:
if int(_get) > 0:
print("Download to path {path}".format(path=led_store))
dw_status = led_download(led_store, _get)
for dwS_key, dwS_val in dw_status.items():
print(dwS_key, dwS_val)
#Delete folders which are older then N days:
if int(_purge) > -1:
print("Purge LED calibration data oder than {purge} days:".format(purge=_purge))
dw_purge = led_purge(led_store, _purge)
for ifolders in dw_purge:
print("purged:", ifolders)
if __name__ == '__main__':
led_keeper()
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
# pylint: skip-file
from __future__ import absolute_import, division, print_function
import collections
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
import math
from salt.ext.tornado.concurrent import TracebackFuture, is_future
from salt.ext.tornado.log import app_log, gen_log
from salt.ext.tornado.platform.auto import set_close_exec, Waker
from salt.ext.tornado import stack_context
from salt.ext.tornado.util import PY3, Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
if PY3:
import _thread as thread
else:
import thread
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server:
.. testcode::
import errno
import functools
import tornado.ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
if __name__ == '__main__':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = tornado.ioloop.IOLoop.current()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
.. testoutput::
:hide:
By default, a newly-constructed `IOLoop` becomes the thread's current
`IOLoop`, unless there already is a current `IOLoop`. This behavior
can be controlled with the ``make_current`` argument to the `IOLoop`
constructor: if ``make_current=True``, the new `IOLoop` will always
try to become current and it raises an error if there is already a
current instance. If ``make_current=False``, the new `IOLoop` will
not try to become current.
.. versionchanged:: 4.2
Added the ``make_current`` keyword argument to the `IOLoop`
constructor.
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. In most other cases, it is better to use `current()`
to get the current thread's `IOLoop`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
When using an `IOLoop` subclass, `install` must be called prior
to creating any objects that implicitly create their own
`IOLoop` (e.g., :class:`tornado.httpclient.AsyncHTTPClient`).
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the fallback to
`IOLoop.instance()`.
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from salt.ext.tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from salt.ext.tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from salt.ext.tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self, make_current=None):
if make_current is None:
if IOLoop.current(instance=False) is None:
self.make_current()
elif make_current:
if IOLoop.current(instance=False) is not None:
raise RuntimeError("current IOLoop already exists")
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
The function must return either a yieldable object or
``None``. If the function returns a yieldable object, the
`IOLoop` will run until the yieldable is resolved (and
`run_sync()` will return the yieldable's result). If it raises
an exception, the `IOLoop` will stop and the exception will be
re-raised to the caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.current().run_sync(main)
.. versionchanged:: 4.3
Returning a non-``None``, non-yieldable value is now an error.
"""
future_cell = [None]
def run():
try:
result = func()
if result is not None:
from salt.ext.tornado.gen import convert_yielded
result = convert_yielded(result)
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None:
#from salt.ext.tornado import gen
import salt.ext.tornado.gen
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
try:
ret = salt.ext.tornado.gen.convert_yielded(ret)
except salt.ext.tornado.gen.BadYieldError:
# It's not unusual for add_callback to be used with
# methods returning a non-None and non-yieldable
# result, which should just be ignored.
pass
else:
self.add_future(ret, self._discard_future_result)
except Exception:
self.handle_callback_exception(callback)
def _discard_future_result(self, future):
"""Avoid unhandled-exception warnings from spawned coroutines."""
future.result()
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None, **kwargs):
super(PollIOLoop, self).initialize(**kwargs)
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = collections.deque()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in list(self._handlers.values()):
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError:
# Non-main thread, or the previous value of wakeup_fd
# is no longer valid.
old_wakeup_fd = None
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
ncallbacks = len(self._callbacks)
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512 and
self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for i in range(ncallbacks):
self._run_callback(self._callbacks.popleft())
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that modify self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
if self._closing:
return
# Blindly insert into self._callbacks. This is safe even
# from signal handlers because deque.append is atomic.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if thread.get_ident() != self._thread_ident:
# This will write one byte but Waker.consume() reads many
# at once, so it's ok to write even when not strictly
# necessary.
self._waker.wake()
else:
# If we're on the IOLoop's thread, we don't need to wake anyone.
pass
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tdeadline']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tdeadline = (deadline, next(io_loop._timeout_counter))
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return self.tdeadline < other.tdeadline
def __le__(self, other):
return self.tdeadline <= other.tdeadline
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
Note that the timeout is given in milliseconds, while most other
time-related functions in Tornado use seconds.
If the callback runs for longer than ``callback_time`` milliseconds,
subsequent invocations will be skipped to get back on schedule.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
if self._next_timeout <= current_time:
callback_time_sec = self.callback_time / 1000.0
self._next_timeout += (math.floor((current_time - self._next_timeout) /
callback_time_sec) + 1) * callback_time_sec
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
|
#!/usr/bin/env python
# A robot morphology (currently a snake and a phantomx robot morphology) is set out to learn locomotion patterns to perform according to the reward function. The reward
# function can be something like moving along or reaching a certain speed in the direction of a certain axis. The morphologies can be controlled using position,velocity or
# torque control. The environment must be diversified into multiple environments, maybe one for each morphology.
import gym
from gym import spaces
import numpy as np
import pybullet as p
import sys
import time
from RewardFunction import RewardFunction
from VelocityHelper import VelocityHelper
np.set_printoptions(precision=3, suppress=True, linewidth=10000)
def add_opts(parser):
# add some parser arguments such as the ones below
parser.add_argument('--delay', type=float, default=0.0)
parser.add_argument('--action-gain', type=float, default=-1,
help="magnitude of action gain applied per step")
parser.add_argument('--action-force', type=float, default=1.25,
help="magnitude of action gain applied per step")
parser.add_argument('--gravity-force', type=float, default=-9.81,
help="amount of gravity")
parser.add_argument('--control-type', type=str, default='position-control',
help="the type of control to move the morphology (position-control, velocity-control, torque-control)")
parser.add_argument('--morphology-type', type=int, default=3,
help="Type of morphology; 1 = snake/2 = springy snake/3 = phantomx")
parser.add_argument('--action-repeats', type=int, default=2,
help="number of action repeats")
parser.add_argument('--steps-per-repeat', type=int, default=5,
help="number of sim steps per repeat")
parser.add_argument('--max-episode-len', type=int, default=200,
help="maximum episode length for motion")
parser.add_argument('--random-initial-position', action='store_true',
help="Should the morphology start in random initial position?")
def state_fields_of_pose_of(body_id, link_id=-1): # a method you will most probably need a lot to get pose and orientation
if link_id == -1:
(x,y,z), (a,b,c,d) = p.getBasePositionAndOrientation(body_id)
else:
(x,y,z), (a,b,c,d),_,_,_,_ = p.getLinkState(body_id, link_id)
return np.array([x,y,z,a,b,c,d])
def state_fields_of_pv_of(body_id, vHelper, link_id=-1):
if link_id == -1:
(x,y,z), (a,b,c,d) = p.getBasePositionAndOrientation(body_id)
(vx,vy,vz), (va,vb,vc) = p.getBaseVelocity(body_id, 0)
else:
(x,y,z), (a,b,c,d),_,_,_,_ = p.getLinkState(body_id, link_id)
o = vHelper.getVelocities()
(vx,vy,vz), (va,vb,vc) = (x-o[link_id+1][0],y-o[link_id+1][1],z-o[link_id+1][2]), (a-o[link_id+1][3],b-o[link_id+1][4],c-o[link_id+1][5])
return np.array([x,y,z,a,b,c,d,vx,vy,vz,va,vb,vc])
def get_discrete_action_space_of(body_id):
num_joints = p.getNumJoints(body_id)
return spaces.Discrete(3^num_joints) # here is the state explosion!
def get_continuous_action_space_of(body_id): # the continuous version avoids the state explosion!
num_joints = p.getNumJoints(body_id)
return spaces.Box(-1.0, 1.0, shape=(num_joints,))
class Motionv0Env(gym.Env):
def __init__(self, opts):
self.gui = opts.gui
self.max_episode_len = opts.max_episode_len
self.delay = opts.delay if self.gui else 0.0
self.metadata = {
'discrete_actions' : True,
'continuous_actions' : True,
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : int(np.round(1.0 / 25.0))
}
# do some parameter setting from your parser arguments and add other configurations
self.control_type = opts.control_type
# force to apply per action simulation step.
# in the discrete case this is the fixed gain applied
# in the continuous case each x/y is in range (-G, G)
self.action_gain = opts.action_gain # scale of control (position, velocity)
self.action_force = opts.action_force # Newton
# how many time to repeat each action per step().
# and how many sim steps to do per state capture
# (total number of sim steps = action_repeats * steps_per_repeat
self.repeats = opts.action_repeats
self.steps_per_repeat = opts.steps_per_repeat
self.random_initial_position = opts.random_initial_position
# setup bullet
p.connect(p.GUI if self.gui else p.DIRECT)
p.setGravity(0, 0, opts.gravity_force) # maybe you need gravity?
p.loadURDF("envs/models/ground.urdf", 0,0,0, 0,0,0,1)
# load your models
if opts.morphology_type == 1:
self.body = p.loadURDF("envs/models/simple-snakev0.urdf",0,0,2, 0,0,0,1)
elif opts.morphology_type == 2:
self.body = p.loadURDF("envs/models/springy-snakev0.urdf",0,0,2, 0,0,0,1)
elif opts.morphology_type == 3:
self.body = p.loadURDF("envs/models/phantomx/phantomx.urdf",0,0,2, 0,0,0,1)
self.initPosition, self.initOrientation = p.getBasePositionAndOrientation(self.body)
self.num_joints = p.getNumJoints(self.body)
self.velocityHelper = VelocityHelper(self.body)
self.reward = RewardFunction(self.body, RewardFunction.PositionReward, RewardFunction.XAxis) # velocity in X axis dimension gets rewarded
# in the low dimensional case obs space for problem is (R, num_links, 13)
# R = number of repeats
# num joints + 1 = number of links of snake
# 13d tuple for pos + orientation + velocities (angular velocity in euler)
self.state_shape = (self.repeats, p.getNumJoints(self.body)+1, 13)
# no state until reset.
self.state = np.empty(self.state_shape, dtype=np.float32)
def configureActions(self, discrete_actions):
# if it is possible to switch actions, do this here
# true if action space is discrete
# false if action space is continuous
self.discrete_actions = discrete_actions
if self.discrete_actions:
self.action_space = get_discrete_action_space_of(self.body)
else:
self.action_space = get_continuous_action_space_of(self.body)
# Our observations can be within this box
float_max = np.finfo(np.float32).max
self.observation_space = gym.spaces.Box(-float_max, float_max, self.state_shape)
if self.discrete_actions:
if self.control_type == "position-control":
if self.action_gain == -1:
self.action_gain = 0.5 * np.pi
elif self.control_type == "velocity-control":
if self.action_gain == -1:
self.action_gain = 1;
elif self.control_type == "torque-control":
if self.action_gain == -1:
self.action_gain = 1.5; # torque control is even weirder
else:
if self.control_type == "position-control":
if self.action_gain == -1:
self.action_gain = np.pi
elif self.control_type == "velocity-control":
if self.action_gain == -1:
self.action_gain = 1;
elif self.control_type == "torque-control":
if self.action_gain == -1:
self.action_gain = 0; # torque control is weird
def _configure(self, display=None):
pass
def _seed(self, seed=None):
pass
def _render(self, mode='human', close=False):
pass
def _step(self, action):
if self.done:
print >>sys.stderr, "Why is step called when the episode is done?"
return np.copy(self.state), 0, True, {}
# choose your next action
# do some out of bounds checks to reset the environment and agent
# calculate the reward for your agent
info = {}
# check if action is NaN
if np.isnan(action).any():
print 'action is NaN'
info['done_reason'] = 'action is NaN'
reward = 0
self.done = True
else:
# based on action decide the actions for each joint
joint_actions = -np.ones(self.num_joints)
if self.discrete_actions: # the actions come out of one number encoding the actions to apply
# example: (state explosion 3^2)
# action 0 => (-1,-1) # action 1 => ( 0,-1) # action 2 => ( 1,-1)
# action 3 => (-1, 0) # action 4 => ( 0, 0) # action 5 => ( 1, 0)
# action 6 => (-1, 1) # action 7 => ( 0, 1) # action 8 => ( 1, 1)
for joint in xrange(self.num_joints): # finds out the action to apply for each joint
if(action == 0):
break
action_sign = np.mod(action,3) - 1
action = np.floor_divide(action, 3)
joint_actions[joint] = action_sign * action * self.action_gain
else: # continuous actions
joint_actions = action * self.action_gain
# step simulation forward. at the end of each repeat we set part of the step's
# state by capturing the cart & pole state in some form.
for r in xrange(self.repeats):
for _ in xrange(self.steps_per_repeat):
p.stepSimulation()
for joint in xrange(self.num_joints):
if self.control_type == "position-control":
p.setJointMotorControl2(self.body,joint,p.POSITION_CONTROL, targetPosition = joint_actions[joint], force = self.action_force)
elif self.control_type == "velocity-control":
p.setJointMotorControl2(self.body,joint,p.VELOCITY_CONTROL, targetVelocity = joint_actions[joint], force = self.action_force)
elif self.control_type == "torque-control":
p.setJointMotorControl2(self.body,joint,p.TORQUE_CONTROL, force = joint_actions[joint])
if self.delay > 0:
time.sleep(self.delay)
self.set_state_element_for_repeat(r)
self.steps += 1
# check for end of episode (by length)
if self.steps >= self.max_episode_len:
info['done_reason'] = 'episode length'
self.done = True
# calc reward
reward = self.reward.getReward()
# check if reward is NaN
if np.isnan(reward):
info['done_reason'] = 'reward is NaN'
reward = 0
self.done = True
# return observation
return np.copy(self.state), reward, self.done, info
def set_state_element_for_repeat(self, repeat):
# in low dim case state is (R, num_links, 10)
# R -> repeat, num_links -> N objects (all links), 13 -> 14d pos+orientation+velocity
for link in xrange(-1, self.num_joints):
self.state[repeat][link] = state_fields_of_pv_of(self.body, self.velocityHelper, link) # get position, orientation and velocity of link
def _reset(self):
# reset your environment
# reset state
self.steps = 0
self.done = False
# reset morphology
p.resetBasePositionAndOrientation(self.body, self.initPosition, self.initOrientation) # reset body position and orientation
resetPosition = 0
for joint in xrange(self.num_joints):
if self.random_initial_position:
resetPosition = np.random.random() * 2 * np.pi - np.pi
p.resetJointState(self.body, joint, resetPosition) # reset joint position of joints
for _ in xrange(100): p.stepSimulation()
# bootstrap state by running for all repeats
for i in xrange(self.repeats):
self.set_state_element_for_repeat(i)
# return this state
return np.copy(self.state)
|
|
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File for convert.py testing all assertions."""
# pylint: disable=bad-continuation
# pylint: disable=line-too-long
# pylint: disable=undefined-variable
AssertThat(equal_a0).IsEqualTo(equal_b0)
AssertThat(equal_t0).IsTrue()
AssertThat(equal_f0).IsFalse()
AssertThat(equal_n0).IsNone()
AssertThat(equal_n1).IsEqualTo(26)
AssertThat(equal_n2).IsEqualTo(27)
AssertThat(equal_n3).IsEqualTo(2.8)
AssertThat(equal_n4).IsEqualTo(-29)
AssertThat(equal_n5).IsEqualTo(30L)
AssertThat(equal_n6).IsEqualTo(-3.1)
AssertThat(equal_z0).IsZero()
AssertThat(equal_z1).IsZero()
AssertThat(function_call()).IsEqualTo(equal_m0)
AssertThat(equal_long_method(arg0, (tuple_e0, tuple_e1),
[list(list_e0, list_e1)])).IsEqualTo(equal_long_method({dict_k0: dict_v0,
dict_k1: (tuple_e0, tuple_e1)}))
AssertThat(equal_ra0).IsEqualTo('equal_rb0')
AssertThat(equal_ra1).IsEqualTo('equal_rb1')
AssertThat(equal_ra2).IsEqualTo("equal_rb2")
AssertThat(equal_ra3).IsEqualTo("equal_rb3")
AssertThat(actual_ra4).IsEqualTo(equal_rb4)
AssertThat(actual_ra5).IsEqualTo(equal_rb5)
AssertThat(equal_ra6).IsEqualTo(expected_rb6)
AssertThat(equal_ra7).IsEqualTo(expected_rb7)
AssertThat(result_ra8).IsEqualTo(equal_rb8)
AssertThat(result_ra9).IsEqualTo(equal_rb9)
AssertThat(os.environ['ENV0']).IsEqualTo(equal_rba)
AssertThat(os.environ['ENV1']).IsEqualTo(equal_rbb)
AssertThat(os.environ.get('ENV2')).IsEqualTo(equal_rbc)
AssertThat(os.environ.get('ENV3')).IsEqualTo(equal_rbd)
AssertThat(equal_rae).HasSize(55)
AssertThat(equal_raf).HasSize(56)
AssertThat(equal_rag).IsEmpty()
AssertThat(equal_rah).IsEmpty()
AssertThat(equal_e0).IsEmpty()
AssertThat(equal_e1).IsEmpty()
AssertThat(equal_e2).IsEmpty()
AssertThat(equal_e3).IsEmpty()
AssertThat(equal_e4).IsEmpty()
AssertThat(equal_e5).IsEmpty()
AssertThat(equal_e6).IsEmpty()
AssertThat(equal_e7).IsEmpty()
AssertThat(equal_e8).IsEmpty()
AssertThat(equal_e9).IsEmpty()
AssertThat(equal_ea).IsEmpty()
AssertThat(equal_eb).IsEmpty()
AssertThat(equal_ec).IsEmpty()
AssertThat(equal_ed).IsEmpty()
AssertThat(equal_ee).IsEmpty()
AssertThat(equal_ef).IsEmpty()
AssertThat(equal_eg).IsEmpty()
AssertThat(equal_eh).IsEmpty()
AssertThat(equal_ei).IsEmpty()
AssertThat(equal_ej).IsEmpty()
AssertThat(equal_ek).IsEmpty()
AssertThat(equal_el).IsEmpty()
AssertThat(equal_em).IsEmpty()
AssertThat(equal_en).IsEmpty()
AssertThat(equal_eo).IsEmpty()
AssertThat(equal_ep).IsEmpty()
AssertThat(equal_eq).IsEmpty()
AssertThat(equal_er).IsEmpty()
AssertThat(equal_es).IsEmpty()
AssertThat(equal_et).IsEmpty()
AssertThat(equal_l0).ContainsExactly(equal_b0, equal_c0).InOrder()
AssertThat(equal_l1).ContainsExactly(equal_b1, equal_c1).InOrder()
AssertThat(equal_l2).ContainsExactly(equal_b2, equal_c2).InOrder()
AssertThat(equal_l3).ContainsExactly(equal_b3, equal_c3).InOrder()
AssertThat(equal_l4).ContainsExactlyElementsIn(equal_b4 for equal_c4 in equal_d4).InOrder()
AssertThat(equal_l5).ContainsExactlyElementsIn(equal_b5 for equal_c5 in equal_d5).InOrder()
AssertThat(equal_l6).ContainsExactlyElementsIn(equal_b6 for equal_c6 in equal_d6)
AssertThat(equal_l7).ContainsExactly(equal_b7)
AssertThat(equal_l8).ContainsExactlyElementsIn(equal_b8 for equal_c8 in equal_d8).InOrder()
AssertThat(equal_l9).ContainsExactlyElementsIn(equal_b9 for equal_c9 in equal_d9).InOrder()
AssertThat(equal_la).ContainsExactlyElementsIn(equal_ba for equal_ca in equal_da)
AssertThat(equal_lb).ContainsExactly(equal_bb)
AssertThat(equal_len_a0).HasSize(equal_len_b0)
AssertThat(equal_len_a1).HasSize(103)
AssertThat(equal_len_a2).IsEmpty()
AssertThat(empty_container_a0).IsEmpty()
AssertThat(not_empty_container_a0).IsNotEmpty()
AssertThat(dict_subset_a0.items()).ContainsAllIn(dict_subset_b0.items())
AssertThat(dict_equal_a0).ContainsExactlyItemsIn(dict_equal_b0)
AssertThat(dict_equal_e0).IsEmpty()
AssertThat(dict_equal_e1).IsEmpty()
AssertThat(dict_equal_e2).IsEmpty()
AssertThat(dict_equal_e3).IsEmpty()
AssertThat(dict_equal_e4).IsEmpty()
AssertThat(dict_equal_e5).IsEmpty()
AssertThat(dict_equal_a6).ContainsExactlyItemsIn({dict_equal_b6: dict_equal_c6})
AssertThat(dict_equal_a7).ContainsExactlyItemsIn({dict_equal_b7: dict_equal_c7})
AssertThat(sorted(count_equal_a0)).ContainsExactlyElementsIn(sorted(count_equal_b0)).InOrder()
AssertThat(sorted(count_equal_a1)).ContainsExactlyElementsIn(sorted([count_equal_b1, count_equal_c1])).InOrder()
AssertThat(sorted(count_equal_a2)).ContainsExactlyElementsIn(sorted((count_equal_b2, count_equal_c2))).InOrder()
AssertThat(sorted(count_equal_a3)).ContainsExactlyElementsIn(sorted([count_equal_b3, count_equal_c3])).InOrder()
AssertThat(sorted(count_equal_a4)).ContainsExactlyElementsIn(sorted((count_equal_b4, count_equal_c4))).InOrder()
AssertThat(sorted(count_equal_a5)).ContainsExactlyElementsIn(sorted([count_equal_b5])).InOrder()
AssertThat(sorted(items_equal_a0)).ContainsExactlyElementsIn(sorted(items_equal_b0)).InOrder()
AssertThat(sorted(items_equal_a1)).ContainsExactlyElementsIn(sorted([items_equal_b1, items_equal_c1])).InOrder()
AssertThat(sorted(items_equal_a2)).ContainsExactlyElementsIn(sorted((items_equal_b2, items_equal_c2))).InOrder()
AssertThat(sorted(items_equal_a3)).ContainsExactlyElementsIn(sorted([items_equal_b3, items_equal_c3])).InOrder()
AssertThat(sorted(items_equal_a4)).ContainsExactlyElementsIn(sorted((items_equal_b4, items_equal_c4))).InOrder()
AssertThat(sorted(items_equal_a5)).ContainsExactlyElementsIn(sorted([items_equal_b5])).InOrder()
AssertThat(list_equal_a0).ContainsExactlyElementsIn(list_equal_b0).InOrder()
AssertThat(list_equal_l1).ContainsExactly(list_equal_b1, list_equal_c1).InOrder()
AssertThat(list_equal_l2).ContainsExactly(list_equal_b2, list_equal_c2).InOrder()
AssertThat(list_equal_l3).ContainsExactly(list_equal_b3)
AssertThat(sequence_equal_a0).ContainsExactlyElementsIn(sequence_equal_b0).InOrder()
AssertThat(sequence_equal_a1).ContainsExactly(sequence_equal_b1, sequence_equal_c1).InOrder()
AssertThat(sequence_equal_a2).ContainsExactly(sequence_equal_b2, sequence_equal_c2).InOrder()
AssertThat(sequence_equal_a3).ContainsExactly(sequence_equal_b3, sequence_equal_c3).InOrder()
AssertThat(sequence_equal_a4).ContainsExactly(sequence_equal_b4, sequence_equal_c4).InOrder()
AssertThat(sequence_equal_a5).ContainsExactly(sequence_equal_b5)
AssertThat(set_equal_a0).ContainsExactlyElementsIn(set_equal_b0)
AssertThat(set_equal_a1).ContainsExactly(set_equal_b1, set_equal_c1)
AssertThat(set_equal_a2).ContainsExactly(set_equal_b2, set_equal_c2)
AssertThat(tuple_equal_a0).ContainsExactlyElementsIn(tuple_equal_b0).InOrder()
AssertThat(tuple_equal_a1).ContainsExactly(tuple_equal_b1, tuple_equal_c1).InOrder()
AssertThat(tuple_equal_a2).ContainsExactly(tuple_equal_b2, tuple_equal_c2).InOrder()
AssertThat(same_elements_a0).ContainsExactlyElementsIn(same_elements_b0)
AssertThat(same_elements_a1).ContainsExactly(same_elements_b1, same_elements_c1)
AssertThat(same_elements_a2).ContainsExactly(same_elements_b2, same_elements_c2)
AssertThat(same_elements_a3).ContainsExactly(same_elements_b3, same_elements_c3)
AssertThat(same_elements_a4).ContainsExactly(same_elements_b4, same_elements_c4)
AssertThat(same_elements_a5).ContainsExactly(same_elements_b5)
AssertThat(equal_a1).IsEqualTo(equal_b1)
AssertThat(not_equal_a0).IsNotEqualTo(not_equal_b0)
AssertThat(not_equal_a1).IsNotEqualTo(not_equal_b1)
AssertThat(not_equal_t0).IsFalse()
AssertThat(not_equal_f0).IsTrue()
AssertThat(not_equal_n0).IsNotNone()
AssertThat(not_equal_n1).IsNotEqualTo(138)
AssertThat(not_equal_n2).IsNotEqualTo(139)
AssertThat(not_equal_n3).IsNotEqualTo(14.0)
AssertThat(not_equal_n4).IsNotEqualTo(-141)
AssertThat(not_equal_n5).IsNotEqualTo(142L)
AssertThat(not_equal_n6).IsNotEqualTo(-14.3)
AssertThat(not_equal_z0).IsNonZero()
AssertThat(not_equal_z1).IsNonZero()
AssertThat(function_call()).IsNotEqualTo(not_equal_m0)
AssertThat(not_equal_ra0).IsNotEqualTo('not_equal_rb0')
AssertThat(not_equal_ra1).IsNotEqualTo('not_equal_rb1')
AssertThat(not_equal_ra2).IsNotEqualTo("not_equal_rb2")
AssertThat(not_equal_ra3).IsNotEqualTo("not_equal_rb3")
AssertThat(actual_ra4).IsNotEqualTo(not_equal_rb4)
AssertThat(actual_ra5).IsNotEqualTo(not_equal_rb5)
AssertThat(not_equal_ra6).IsNotEqualTo(expected_rb6)
AssertThat(not_equal_ra7).IsNotEqualTo(expected_rb7)
AssertThat(result_ra8).IsNotEqualTo(not_equal_rb8)
AssertThat(result_ra9).IsNotEqualTo(not_equal_rb9)
AssertThat(os.environ['ENV0']).IsNotEqualTo(not_equal_rba)
AssertThat(os.environ['ENV1']).IsNotEqualTo(not_equal_rbb)
AssertThat(os.environ.get('ENV2')).IsNotEqualTo(not_equal_rbc)
AssertThat(os.environ.get('ENV3')).IsNotEqualTo(not_equal_rbd)
AssertThat(len(not_equal_rae)).IsNotEqualTo(162)
AssertThat(len(not_equal_raf)).IsNotEqualTo(163)
AssertThat(not_equal_rag).IsNotEmpty()
AssertThat(not_equal_rah).IsNotEmpty()
AssertThat(not_equal_e0).IsNotEmpty()
AssertThat(not_equal_e1).IsNotEmpty()
AssertThat(not_equal_e2).IsNotEmpty()
AssertThat(not_equal_e3).IsNotEmpty()
AssertThat(not_equal_e4).IsNotEmpty()
AssertThat(not_equal_e5).IsNotEmpty()
AssertThat(not_equal_e6).IsNotEmpty()
AssertThat(not_equal_e7).IsNotEmpty()
AssertThat(not_equal_e8).IsNotEmpty()
AssertThat(not_equal_e9).IsNotEmpty()
AssertThat(not_equal_ea).IsNotEmpty()
AssertThat(not_equal_eb).IsNotEmpty()
AssertThat(not_equal_ec).IsNotEmpty()
AssertThat(not_equal_ed).IsNotEmpty()
AssertThat(not_equal_ee).IsNotEmpty()
AssertThat(not_equal_ef).IsNotEmpty()
AssertThat(not_equal_eg).IsNotEmpty()
AssertThat(not_equal_eh).IsNotEmpty()
AssertThat(not_equal_ei).IsNotEmpty()
AssertThat(not_equal_ej).IsNotEmpty()
AssertThat(not_equal_ek).IsNotEmpty()
AssertThat(not_equal_el).IsNotEmpty()
AssertThat(not_equal_em).IsNotEmpty()
AssertThat(not_equal_en).IsNotEmpty()
AssertThat(not_equal_eo).IsNotEmpty()
AssertThat(not_equal_ep).IsNotEmpty()
AssertThat(not_equal_eq).IsNotEmpty()
AssertThat(not_equal_er).IsNotEmpty()
AssertThat(not_equal_es).IsNotEmpty()
AssertThat(not_equal_et).IsNotEmpty()
AssertThat(underscore).IsTrue()
AssertThat(true_a0).IsTrue()
AssertThat(false_a0).IsFalse()
AssertThat(less_a0).IsLessThan(less_b0)
AssertThat(less_a1).IsLessThan(203)
AssertThat(less_a2).IsGreaterThan(204)
AssertThat(less_equal_a0).IsAtMost(less_equal_b0)
AssertThat(less_equal_a1).IsAtMost(207)
AssertThat(less_equal_a2).IsAtLeast(208)
AssertThat(greater_a0).IsGreaterThan(greater_b0)
AssertThat(greater_a1).IsGreaterThan(211)
AssertThat(greater_a2).IsLessThan(212)
AssertThat(greater_equal_a0).IsAtLeast(greater_equal_b0)
AssertThat(greater_equal_a1).IsAtLeast(215)
AssertThat(greater_equal_a2).IsAtMost(216)
AssertThat(is_a0).IsSameAs(is_b0)
AssertThat(is_not_a0).IsNotSameAs(is_not_b0)
AssertThat(is_none_a0).IsNone()
AssertThat(is_not_none_a0).IsNotNone()
AssertThat(is_instance_a0).IsInstanceOf(is_instance_b0)
AssertThat(is_not_instance_a0).IsNotInstanceOf(is_not_instance_b0)
AssertThat(in_a0).IsIn(in_b0)
AssertThat(in_a1).IsAnyOf(in_b1, in_c1)
AssertThat(in_a2).IsAnyOf(in_b2, in_c2)
AssertThat(not_in_a0).IsNotIn(not_in_b0)
AssertThat(not_in_a1).IsNoneOf(not_in_b1, not_in_c1)
AssertThat(not_in_a2).IsNoneOf(not_in_b2, not_in_c2)
AssertThat(starts_a0).StartsWith('starts_b0')
AssertThat(starts_a1).StartsWith("starts_b1")
AssertThat(starts_a2).StartsWith(r'starts_b2')
AssertThat(starts_a3).StartsWith(u"starts_b3")
AssertThat(starts_a4).StartsWith(r"starts_b4")
AssertThat(starts_a5).StartsWith(u'starts_b5')
AssertThat(ends_a0).EndsWith('ends_b0')
AssertThat(ends_a1).EndsWith("ends_b1")
AssertThat(ends_a2).EndsWith(r'ends_b2')
AssertThat(ends_a3).EndsWith(u"ends_b3")
AssertThat(ends_a4).EndsWith(r"ends_b4")
AssertThat(ends_a5).EndsWith(u'ends_b5')
AssertThat(regex_a0).ContainsMatch(regex_b0)
AssertThat(regexp_matches_a0).ContainsMatch(regexp_matches_b0)
AssertThat(not_regex_a0).DoesNotContainMatch(not_regex_b0)
AssertThat(not_regexp_matches_a0).DoesNotContainMatch(not_regexp_matches_b0)
with AssertThat(raises_a0).IsRaised():
MethodThatRaises()
with AssertThat(raises_a1).IsRaised():
MethodThatRaises()
with AssertThat(raises_a2).IsRaised():
MethodThatRaises(raises_b2, raises_c2)
with AssertThat(raises_regexp_a0).IsRaised(matching=raises_regexp_b0):
MethodThatRaisesRegexp()
with AssertThat(raises_regexp_a1).IsRaised(matching=raises_regexp_b1):
MethodThatRaisesRegexp()
with AssertThat(raises_regexp_a2).IsRaised(matching=raises_regexp_b2):
MethodThatRaisesRegexp(raises_regexp_c2, raises_regexp_d2)
with AssertThat(raises_with_regexp_match_a0).IsRaised(matching=raises_with_regexp_match_b0):
MethodThatRaisesRegexp()
with AssertThat(raises_with_regexp_match_a1).IsRaised(matching=raises_with_regexp_match_b1):
MethodThatRaisesRegexp()
with AssertThat(raises_with_regexp_match_a2).IsRaised(matching=raises_with_regexp_match_b2):
MethodThatRaisesRegexp(raises_with_regexp_match_c2, raises_with_regexp_match_d2)
AssertThat(mock_method).WasCalled()
AssertThat(mock_method).WasNotCalled()
AssertThat(mock_method).WasCalled().Once()
AssertThat(mock_method_a0).WasNotCalled()
AssertThat(mock_method_b0).WasNotCalled()
AssertThat(mock_method_a1).WasCalled().Once()
AssertThat(mock_method_b1).WasCalled().Once()
AssertThat(mock_method_c0).WasCalled().Times(334)
AssertThat(mock_method_c1).WasCalled().Times(335)
AssertThat(mock_method).WasCalled().LastWith(arg_a1, arg_a2, kwarg_a1=val_a1, kwarg_a2=val_a2)
AssertThat(mock_method).WasCalled().Once().With(arg_b1, arg_b2, kwarg_b1=val_b1, kwarg_b2=val_b2)
AssertThat(mock_method).WasCalled().With(arg_c1, arg_c2, kwarg_c1=val_c1, kwarg2_c2=val_c2)
AssertThat(mock_method).HasCalls([call_a1, call_a2]).InOrder()
AssertThat(mock_method).HasCalls([call_b1, call_b2]).InOrder()
AssertThat(mock_method).HasCalls([call_c1, call_c2]).InOrder()
AssertThat(mock_method).HasCalls([call_d1, call_d2]).InOrder()
AssertThat(mock_method).HasCalls([call_e1, call_e2])
AssertThat(mock_method).HasCalls([call_f1, call_f2])
# pylint: enable=bad-continuation
# pylint: enable=line-too-long
# pylint: enable=undefined-variable
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from collections import defaultdict
from enum import Enum
import numpy as np
import pandas as pd
import six
from rqalpha.const import EXIT_CODE, ACCOUNT_TYPE
from rqalpha.events import EVENT
from rqalpha.interface import AbstractMod
from rqalpha.utils.risk import Risk
class AnalyserMod(AbstractMod):
def __init__(self):
self._env = None
self._mod_config = None
self._enabled = False
self._orders = []
self._trades = []
self._total_portfolios = []
self._total_benchmark_portfolios = []
self._sub_accounts = defaultdict(list)
self._positions = defaultdict(list)
self._benchmark_daily_returns = []
self._portfolio_daily_returns = []
def start_up(self, env, mod_config):
self._env = env
self._mod_config = mod_config
self._enabled = (self._mod_config.record or self._mod_config.plot or self._mod_config.output_file or
self._mod_config.plot_save_file or self._mod_config.report_save_path)
if self._enabled:
env.event_bus.add_listener(EVENT.POST_SETTLEMENT, self._collect_daily)
env.event_bus.add_listener(EVENT.TRADE, self._collect_trade)
env.event_bus.add_listener(EVENT.ORDER_CREATION_PASS, self._collect_order)
def _collect_trade(self, event):
self._trades.append(self._to_trade_record(event.trade))
def _collect_order(self, event):
self._orders.append(event.order)
def _collect_daily(self, event):
date = self._env.calendar_dt.date()
portfolio = self._env.portfolio
benchmark_portfolio = self._env.benchmark_portfolio
self._portfolio_daily_returns.append(portfolio.daily_returns)
self._total_portfolios.append(self._to_portfolio_record(date, portfolio))
if benchmark_portfolio is None:
self._benchmark_daily_returns.append(0)
else:
self._benchmark_daily_returns.append(benchmark_portfolio.daily_returns)
self._total_benchmark_portfolios.append(self._to_portfolio_record(date, benchmark_portfolio))
for account_type, account in six.iteritems(self._env.portfolio.accounts):
self._sub_accounts[account_type].append(self._to_account_record(date, account))
for order_book_id, position in six.iteritems(account.positions):
self._positions[account_type].append(self._to_position_record(date, order_book_id, position))
def _symbol(self, order_book_id):
return self._env.data_proxy.instruments(order_book_id).symbol
@staticmethod
def _safe_convert(value, ndigits=3):
if isinstance(value, Enum):
return value.name
if isinstance(value, (float, np.float64, np.float32, np.float16, np.float)):
return round(value, ndigits)
return value
def _to_portfolio_record(self, date, portfolio):
return {
'date': date,
'cash': self._safe_convert(portfolio.cash),
'total_returns': self._safe_convert(portfolio.total_returns),
'daily_returns': self._safe_convert(portfolio.daily_returns),
'daily_pnl': self._safe_convert(portfolio.daily_pnl),
'total_value': self._safe_convert(portfolio.total_value),
'market_value': self._safe_convert(portfolio.market_value),
'annualized_returns': self._safe_convert(portfolio.annualized_returns),
'unit_net_value': self._safe_convert(portfolio.unit_net_value),
'units': portfolio.units,
'static_unit_net_value': self._safe_convert(portfolio.static_unit_net_value),
}
ACCOUNT_FIELDS_MAP = {
ACCOUNT_TYPE.STOCK: ['dividend_receivable'],
ACCOUNT_TYPE.FUTURE: ['holding_pnl', 'realized_pnl', 'daily_pnl', 'margin'],
}
def _to_account_record(self, date, account):
data = {
'date': date,
'total_cash': self._safe_convert(account.cash + account.frozen_cash),
'transaction_cost': self._safe_convert(account.transaction_cost),
'market_value': self._safe_convert(account.market_value),
'total_value': self._safe_convert(account.total_value),
}
for f in self.ACCOUNT_FIELDS_MAP[account.type]:
data[f] = self._safe_convert(getattr(account, f))
return data
POSITION_FIELDS_MAP = {
ACCOUNT_TYPE.STOCK: [
'quantity', 'last_price', 'avg_price', 'market_value', 'sellable'
],
ACCOUNT_TYPE.FUTURE: [
'pnl', 'daily_pnl', 'holding_pnl', 'realized_pnl', 'margin', 'market_value',
'buy_pnl', 'sell_pnl', 'closable_buy_quantity', 'buy_margin', 'buy_today_quantity',
'buy_avg_open_price', 'buy_avg_holding_price', 'closable_sell_quantity',
'sell_margin', 'sell_today_quantity', 'sell_quantity', 'sell_avg_open_price',
'sell_avg_holding_price'
],
}
def _to_position_record(self, date, order_book_id, position):
data = {
'order_book_id': order_book_id,
'symbol': self._symbol(order_book_id),
'date': date,
}
for f in self.POSITION_FIELDS_MAP[position.type]:
data[f] = self._safe_convert(getattr(position, f))
return data
def _to_trade_record(self, trade):
return {
'datetime': trade.datetime.strftime("%Y-%m-%d %H:%M:%S"),
'trading_datetime': trade.trading_datetime.strftime("%Y-%m-%d %H:%M:%S"),
'order_book_id': trade.order_book_id,
'symbol': self._symbol(trade.order_book_id),
'side': self._safe_convert(trade.side),
'position_effect': self._safe_convert(trade.position_effect),
'exec_id': trade.exec_id,
'tax': trade.tax,
'commission': trade.commission,
'last_quantity': trade.last_quantity,
'last_price': self._safe_convert(trade.last_price),
'order_id': trade.order_id,
'transaction_cost': trade.transaction_cost,
}
def tear_down(self, code, exception=None):
if code != EXIT_CODE.EXIT_SUCCESS or not self._enabled:
return
strategy_name = os.path.basename(self._env.config.base.strategy_file).split(".")[0]
data_proxy = self._env.data_proxy
summary = {
'strategy_name': strategy_name,
'start_date': self._env.config.base.start_date.strftime('%Y-%m-%d'),
'end_date': self._env.config.base.end_date.strftime('%Y-%m-%d'),
'strategy_file': self._env.config.base.strategy_file,
'securities': self._env.config.base.securities,
'run_type': self._env.config.base.run_type.value,
'stock_starting_cash': self._env.config.base.stock_starting_cash,
'future_starting_cash': self._env.config.base.future_starting_cash,
}
risk = Risk(np.array(self._portfolio_daily_returns), np.array(self._benchmark_daily_returns),
data_proxy.get_risk_free_rate(self._env.config.base.start_date, self._env.config.base.end_date),
(self._env.config.base.end_date - self._env.config.base.start_date).days + 1)
summary.update({
'alpha': self._safe_convert(risk.alpha, 3),
'beta': self._safe_convert(risk.beta, 3),
'sharpe': self._safe_convert(risk.sharpe, 3),
'information_ratio': self._safe_convert(risk.information_ratio, 3),
'downside_risk': self._safe_convert(risk.annual_downside_risk, 3),
'tracking_error': self._safe_convert(risk.annual_tracking_error, 3),
'sortino': self._safe_convert(risk.sortino, 3),
'volatility': self._safe_convert(risk.annual_volatility, 3),
'max_drawdown': self._safe_convert(risk.max_drawdown, 3),
})
summary.update({
'total_value': self._safe_convert(self._env.portfolio.total_value),
'cash': self._safe_convert(self._env.portfolio.cash),
'total_returns': self._safe_convert(self._env.portfolio.total_returns),
'annualized_returns': self._safe_convert(self._env.portfolio.annualized_returns),
'unit_net_value': self._safe_convert(self._env.portfolio.unit_net_value),
'units': self._env.portfolio.units,
})
if self._env.benchmark_portfolio:
summary['benchmark_total_returns'] = self._safe_convert(self._env.benchmark_portfolio.total_returns)
summary['benchmark_annualized_returns'] = self._safe_convert(
self._env.benchmark_portfolio.annualized_returns)
trades = pd.DataFrame(self._trades)
if 'datetime' in trades.columns:
trades = trades.set_index('datetime')
df = pd.DataFrame(self._total_portfolios)
df['date'] = pd.to_datetime(df['date'])
total_portfolios = df.set_index('date').sort_index()
result_dict = {
'summary': summary,
'trades': trades,
'total_portfolios': total_portfolios,
}
if self._env.benchmark_portfolio is not None:
b_df = pd.DataFrame(self._total_benchmark_portfolios)
df['date'] = pd.to_datetime(df['date'])
benchmark_portfolios = b_df.set_index('date').sort_index()
result_dict['benchmark_portfolios'] = benchmark_portfolios
if self._env.plot_store is not None:
plots = self._env.get_plot_store().get_plots()
plots_items = defaultdict(dict)
for series_name, value_dict in six.iteritems(plots):
for date, value in six.iteritems(value_dict):
plots_items[date][series_name] = value
plots_items[date]["date"] = date
df = pd.DataFrame([dict_data for date, dict_data in six.iteritems(plots_items)])
df["date"] = pd.to_datetime(df["date"])
df = df.set_index("date").sort_index()
result_dict["plots"] = df
for account_type, account in six.iteritems(self._env.portfolio.accounts):
account_name = account_type.name.lower()
portfolios_list = self._sub_accounts[account_type]
df = pd.DataFrame(portfolios_list)
df["date"] = pd.to_datetime(df["date"])
portfolios_df = df.set_index("date").sort_index()
result_dict["{}_portfolios".format(account_name)] = portfolios_df
positions_list = self._positions[account_type]
positions_df = pd.DataFrame(positions_list)
if "date" in positions_df.columns:
positions_df["date"] = pd.to_datetime(positions_df["date"])
positions_df = positions_df.set_index("date").sort_index()
result_dict["{}_positions".format(account_name)] = positions_df
if self._mod_config.output_file:
with open(self._mod_config.output_file, 'wb') as f:
pickle.dump(result_dict, f)
if self._mod_config.report_save_path:
from .report import generate_report
generate_report(result_dict, self._mod_config.report_save_path)
if self._mod_config.plot or self._mod_config.plot_save_file:
from .plot import plot_result
plot_result(result_dict, self._mod_config.plot, self._mod_config.plot_save_file)
return result_dict
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.dataproc.v1",
manifest={
"AutoscalingPolicy",
"BasicAutoscalingAlgorithm",
"BasicYarnAutoscalingConfig",
"InstanceGroupAutoscalingPolicyConfig",
"CreateAutoscalingPolicyRequest",
"GetAutoscalingPolicyRequest",
"UpdateAutoscalingPolicyRequest",
"DeleteAutoscalingPolicyRequest",
"ListAutoscalingPoliciesRequest",
"ListAutoscalingPoliciesResponse",
},
)
class AutoscalingPolicy(proto.Message):
r"""Describes an autoscaling policy for Dataproc cluster
autoscaler.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
id (str):
Required. The policy id.
The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). Cannot begin or end with
underscore or hyphen. Must consist of between 3 and 50
characters.
name (str):
Output only. The "resource name" of the autoscaling policy,
as described in
https://cloud.google.com/apis/design/resource_names.
- For ``projects.regions.autoscalingPolicies``, the
resource name of the policy has the following format:
``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}``
- For ``projects.locations.autoscalingPolicies``, the
resource name of the policy has the following format:
``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}``
basic_algorithm (google.cloud.dataproc_v1.types.BasicAutoscalingAlgorithm):
This field is a member of `oneof`_ ``algorithm``.
worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig):
Required. Describes how the autoscaler will
operate for primary workers.
secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig):
Optional. Describes how the autoscaler will
operate for secondary workers.
labels (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy.LabelsEntry]):
Optional. The labels to associate with this autoscaling
policy. Label **keys** must contain 1 to 63 characters, and
must conform to `RFC
1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. Label
**values** may be empty, but, if present, must contain 1 to
63 characters, and must conform to `RFC
1035 <https://www.ietf.org/rfc/rfc1035.txt>`__. No more than
32 labels can be associated with an autoscaling policy.
"""
id = proto.Field(proto.STRING, number=1,)
name = proto.Field(proto.STRING, number=2,)
basic_algorithm = proto.Field(
proto.MESSAGE, number=3, oneof="algorithm", message="BasicAutoscalingAlgorithm",
)
worker_config = proto.Field(
proto.MESSAGE, number=4, message="InstanceGroupAutoscalingPolicyConfig",
)
secondary_worker_config = proto.Field(
proto.MESSAGE, number=5, message="InstanceGroupAutoscalingPolicyConfig",
)
labels = proto.MapField(proto.STRING, proto.STRING, number=6,)
class BasicAutoscalingAlgorithm(proto.Message):
r"""Basic algorithm for autoscaling.
Attributes:
yarn_config (google.cloud.dataproc_v1.types.BasicYarnAutoscalingConfig):
Required. YARN autoscaling configuration.
cooldown_period (google.protobuf.duration_pb2.Duration):
Optional. Duration between scaling events. A scaling period
starts after the update operation from the previous event
has completed.
Bounds: [2m, 1d]. Default: 2m.
"""
yarn_config = proto.Field(
proto.MESSAGE, number=1, message="BasicYarnAutoscalingConfig",
)
cooldown_period = proto.Field(
proto.MESSAGE, number=2, message=duration_pb2.Duration,
)
class BasicYarnAutoscalingConfig(proto.Message):
r"""Basic autoscaling configurations for YARN.
Attributes:
graceful_decommission_timeout (google.protobuf.duration_pb2.Duration):
Required. Timeout for YARN graceful decommissioning of Node
Managers. Specifies the duration to wait for jobs to
complete before forcefully removing workers (and potentially
interrupting jobs). Only applicable to downscaling
operations.
Bounds: [0s, 1d].
scale_up_factor (float):
Required. Fraction of average YARN pending memory in the
last cooldown period for which to add workers. A scale-up
factor of 1.0 will result in scaling up so that there is no
pending memory remaining after the update (more aggressive
scaling). A scale-up factor closer to 0 will result in a
smaller magnitude of scaling up (less aggressive scaling).
See `How autoscaling
works <https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works>`__
for more information.
Bounds: [0.0, 1.0].
scale_down_factor (float):
Required. Fraction of average YARN pending memory in the
last cooldown period for which to remove workers. A
scale-down factor of 1 will result in scaling down so that
there is no available memory remaining after the update
(more aggressive scaling). A scale-down factor of 0 disables
removing workers, which can be beneficial for autoscaling a
single job. See `How autoscaling
works <https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works>`__
for more information.
Bounds: [0.0, 1.0].
scale_up_min_worker_fraction (float):
Optional. Minimum scale-up threshold as a fraction of total
cluster size before scaling occurs. For example, in a
20-worker cluster, a threshold of 0.1 means the autoscaler
must recommend at least a 2-worker scale-up for the cluster
to scale. A threshold of 0 means the autoscaler will scale
up on any recommended change.
Bounds: [0.0, 1.0]. Default: 0.0.
scale_down_min_worker_fraction (float):
Optional. Minimum scale-down threshold as a fraction of
total cluster size before scaling occurs. For example, in a
20-worker cluster, a threshold of 0.1 means the autoscaler
must recommend at least a 2 worker scale-down for the
cluster to scale. A threshold of 0 means the autoscaler will
scale down on any recommended change.
Bounds: [0.0, 1.0]. Default: 0.0.
"""
graceful_decommission_timeout = proto.Field(
proto.MESSAGE, number=5, message=duration_pb2.Duration,
)
scale_up_factor = proto.Field(proto.DOUBLE, number=1,)
scale_down_factor = proto.Field(proto.DOUBLE, number=2,)
scale_up_min_worker_fraction = proto.Field(proto.DOUBLE, number=3,)
scale_down_min_worker_fraction = proto.Field(proto.DOUBLE, number=4,)
class InstanceGroupAutoscalingPolicyConfig(proto.Message):
r"""Configuration for the size bounds of an instance group,
including its proportional size to other groups.
Attributes:
min_instances (int):
Optional. Minimum number of instances for this group.
Primary workers - Bounds: [2, max_instances]. Default: 2.
Secondary workers - Bounds: [0, max_instances]. Default: 0.
max_instances (int):
Required. Maximum number of instances for this group.
Required for primary workers. Note that by default, clusters
will not use secondary workers. Required for secondary
workers if the minimum secondary instances is set.
Primary workers - Bounds: [min_instances, ). Secondary
workers - Bounds: [min_instances, ). Default: 0.
weight (int):
Optional. Weight for the instance group, which is used to
determine the fraction of total workers in the cluster from
this instance group. For example, if primary workers have
weight 2, and secondary workers have weight 1, the cluster
will have approximately 2 primary workers for each secondary
worker.
The cluster may not reach the specified balance if
constrained by min/max bounds or other autoscaling settings.
For example, if ``max_instances`` for secondary workers is
0, then only primary workers will be added. The cluster can
also be out of balance when created.
If weight is not set on any instance group, the cluster will
default to equal weight for all groups: the cluster will
attempt to maintain an equal number of workers in each group
within the configured size bounds for each group. If weight
is set for one group only, the cluster will default to zero
weight on the unset group. For example if weight is set only
on primary workers, the cluster will use primary workers
only and no secondary workers.
"""
min_instances = proto.Field(proto.INT32, number=1,)
max_instances = proto.Field(proto.INT32, number=2,)
weight = proto.Field(proto.INT32, number=3,)
class CreateAutoscalingPolicyRequest(proto.Message):
r"""A request to create an autoscaling policy.
Attributes:
parent (str):
Required. The "resource name" of the region or location, as
described in
https://cloud.google.com/apis/design/resource_names.
- For ``projects.regions.autoscalingPolicies.create``, the
resource name of the region has the following format:
``projects/{project_id}/regions/{region}``
- For ``projects.locations.autoscalingPolicies.create``,
the resource name of the location has the following
format: ``projects/{project_id}/locations/{location}``
policy (google.cloud.dataproc_v1.types.AutoscalingPolicy):
Required. The autoscaling policy to create.
"""
parent = proto.Field(proto.STRING, number=1,)
policy = proto.Field(proto.MESSAGE, number=2, message="AutoscalingPolicy",)
class GetAutoscalingPolicyRequest(proto.Message):
r"""A request to fetch an autoscaling policy.
Attributes:
name (str):
Required. The "resource name" of the autoscaling policy, as
described in
https://cloud.google.com/apis/design/resource_names.
- For ``projects.regions.autoscalingPolicies.get``, the
resource name of the policy has the following format:
``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}``
- For ``projects.locations.autoscalingPolicies.get``, the
resource name of the policy has the following format:
``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}``
"""
name = proto.Field(proto.STRING, number=1,)
class UpdateAutoscalingPolicyRequest(proto.Message):
r"""A request to update an autoscaling policy.
Attributes:
policy (google.cloud.dataproc_v1.types.AutoscalingPolicy):
Required. The updated autoscaling policy.
"""
policy = proto.Field(proto.MESSAGE, number=1, message="AutoscalingPolicy",)
class DeleteAutoscalingPolicyRequest(proto.Message):
r"""A request to delete an autoscaling policy.
Autoscaling policies in use by one or more clusters will not be
deleted.
Attributes:
name (str):
Required. The "resource name" of the autoscaling policy, as
described in
https://cloud.google.com/apis/design/resource_names.
- For ``projects.regions.autoscalingPolicies.delete``, the
resource name of the policy has the following format:
``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}``
- For ``projects.locations.autoscalingPolicies.delete``,
the resource name of the policy has the following format:
``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListAutoscalingPoliciesRequest(proto.Message):
r"""A request to list autoscaling policies in a project.
Attributes:
parent (str):
Required. The "resource name" of the region or location, as
described in
https://cloud.google.com/apis/design/resource_names.
- For ``projects.regions.autoscalingPolicies.list``, the
resource name of the region has the following format:
``projects/{project_id}/regions/{region}``
- For ``projects.locations.autoscalingPolicies.list``, the
resource name of the location has the following format:
``projects/{project_id}/locations/{location}``
page_size (int):
Optional. The maximum number of results to
return in each response. Must be less than or
equal to 1000. Defaults to 100.
page_token (str):
Optional. The page token, returned by a
previous call, to request the next page of
results.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
class ListAutoscalingPoliciesResponse(proto.Message):
r"""A response to a request to list autoscaling policies in a
project.
Attributes:
policies (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy]):
Output only. Autoscaling policies list.
next_page_token (str):
Output only. This token is included in the
response if there are more results to fetch.
"""
@property
def raw_page(self):
return self
policies = proto.RepeatedField(
proto.MESSAGE, number=1, message="AutoscalingPolicy",
)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
""" Utility functions for algebra etc """
import itertools
import math
import numpy as np
import numpy.linalg as npl
# epsilon for testing whether a number is close to zero
_EPS = np.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def sphere2cart(r, theta, phi):
""" Spherical to Cartesian coordinates
This is the standard physics convention where `theta` is the
inclination (polar) angle, and `phi` is the azimuth angle.
Imagine a sphere with center (0,0,0). Orient it with the z axis
running south-north, the y axis running west-east and the x axis
from posterior to anterior. `theta` (the inclination angle) is the
angle to rotate from the z-axis (the zenith) around the y-axis,
towards the x axis. Thus the rotation is counter-clockwise from the
point of view of positive y. `phi` (azimuth) gives the angle of
rotation around the z-axis towards the y axis. The rotation is
counter-clockwise from the point of view of positive z.
Equivalently, given a point P on the sphere, with coordinates x, y,
z, `theta` is the angle between P and the z-axis, and `phi` is
the angle between the projection of P onto the XY plane, and the X
axis.
Geographical nomenclature designates theta as 'co-latitude', and phi
as 'longitude'
Parameters
------------
r : array_like
radius
theta : array_like
inclination or polar angle
phi : array_like
azimuth angle
Returns
---------
x : array
x coordinate(s) in Cartesion space
y : array
y coordinate(s) in Cartesian space
z : array
z coordinate
Notes
--------
See these pages:
* http://en.wikipedia.org/wiki/Spherical_coordinate_system
* http://mathworld.wolfram.com/SphericalCoordinates.html
for excellent discussion of the many different conventions
possible. Here we use the physics conventions, used in the
wikipedia page.
Derivations of the formulae are simple. Consider a vector x, y, z of
length r (norm of x, y, z). The inclination angle (theta) can be
found from: cos(theta) == z / r -> z == r * cos(theta). This gives
the hypotenuse of the projection onto the XY plane, which we will
call Q. Q == r*sin(theta). Now x / Q == cos(phi) -> x == r *
sin(theta) * cos(phi) and so on.
We have deliberately named this function ``sphere2cart`` rather than
``sph2cart`` to distinguish it from the Matlab function of that
name, because the Matlab function uses an unusual convention for the
angles that we did not want to replicate. The Matlab function is
trivial to implement with the formulae given in the Matlab help.
"""
sin_theta = np.sin(theta)
x = r * np.cos(phi) * sin_theta
y = r * np.sin(phi) * sin_theta
z = r * np.cos(theta)
x, y, z = np.broadcast_arrays(x, y, z)
return x, y, z
def cart2sphere(x, y, z):
r""" Return angles for Cartesian 3D coordinates `x`, `y`, and `z`
See doc for ``sphere2cart`` for angle conventions and derivation
of the formulae.
$0\le\theta\mathrm{(theta)}\le\pi$ and $-\pi\le\phi\mathrm{(phi)}\le\pi$
Parameters
------------
x : array_like
x coordinate in Cartesian space
y : array_like
y coordinate in Cartesian space
z : array_like
z coordinate
Returns
---------
r : array
radius
theta : array
inclination (polar) angle
phi : array
azimuth angle
"""
r = np.sqrt(x * x + y * y + z * z)
theta = np.arccos(np.divide(z, r, where=r > 0))
theta = np.where(r > 0, theta, 0.)
phi = np.arctan2(y, x)
r, theta, phi = np.broadcast_arrays(r, theta, phi)
return r, theta, phi
def sph2latlon(theta, phi):
"""Convert spherical coordinates to latitude and longitude.
Returns
-------
lat, lon : ndarray
Latitude and longitude.
"""
return np.rad2deg(theta - np.pi / 2), np.rad2deg(phi - np.pi)
def normalized_vector(vec, axis=-1):
""" Return vector divided by its Euclidean (L2) norm
See :term:`unit vector` and :term:`Euclidean norm`
Parameters
------------
vec : array_like shape (3,)
Returns
----------
nvec : array shape (3,)
vector divided by L2 norm
Examples
-----------
>>> vec = [1, 2, 3]
>>> l2n = np.sqrt(np.dot(vec, vec))
>>> nvec = normalized_vector(vec)
>>> np.allclose(np.array(vec) / l2n, nvec)
True
>>> vec = np.array([[1, 2, 3]])
>>> vec.shape == (1, 3)
True
>>> normalized_vector(vec).shape == (1, 3)
True
"""
return vec / vector_norm(vec, axis, keepdims=True)
def vector_norm(vec, axis=-1, keepdims=False):
""" Return vector Euclidean (L2) norm
See :term:`unit vector` and :term:`Euclidean norm`
Parameters
-------------
vec : array_like
Vectors to norm.
axis : int
Axis over which to norm. By default norm over last axis. If `axis` is
None, `vec` is flattened then normed.
keepdims : bool
If True, the output will have the same number of dimensions as `vec`,
with shape 1 on `axis`.
Returns
---------
norm : array
Euclidean norms of vectors.
Examples
--------
>>> import numpy as np
>>> vec = [[8, 15, 0], [0, 36, 77]]
>>> vector_norm(vec)
array([ 17., 85.])
>>> vector_norm(vec, keepdims=True)
array([[ 17.],
[ 85.]])
>>> vector_norm(vec, axis=0)
array([ 8., 39., 77.])
"""
vec = np.asarray(vec)
vec_norm = np.sqrt((vec * vec).sum(axis))
if keepdims:
if axis is None:
shape = [1] * vec.ndim
else:
shape = list(vec.shape)
shape[axis] = 1
vec_norm = vec_norm.reshape(shape)
return vec_norm
def rodrigues_axis_rotation(r, theta):
""" Rodrigues formula
Rotation matrix for rotation around axis r for angle theta.
The rotation matrix is given by the Rodrigues formula:
R = Id + sin(theta)*Sn + (1-cos(theta))*Sn^2
with::
0 -nz ny
Sn = nz 0 -nx
-ny nx 0
where n = r / ||r||
In case the angle ||r|| is very small, the above formula may lead
to numerical instabilities. We instead use a Taylor expansion
around theta=0:
R = I + sin(theta)/tetha Sr + (1-cos(theta))/teta2 Sr^2
leading to:
R = I + (1-theta2/6)*Sr + (1/2-theta2/24)*Sr^2
Parameters
-----------
r : array_like shape (3,), axis
theta : float, angle in degrees
Returns
----------
R : array, shape (3,3), rotation matrix
Examples
---------
>>> import numpy as np
>>> from dipy.core.geometry import rodrigues_axis_rotation
>>> v=np.array([0,0,1])
>>> u=np.array([1,0,0])
>>> R=rodrigues_axis_rotation(v,40)
>>> ur=np.dot(R,u)
>>> np.round(np.rad2deg(np.arccos(np.dot(ur,u))))
40.0
"""
theta = np.deg2rad(theta)
if theta > 1e-30:
n = r / np.linalg.norm(r)
Sn = np.array([[0, -n[2], n[1]], [n[2], 0, -n[0]], [-n[1], n[0], 0]])
R = np.eye(3) + np.sin(theta) * Sn + \
(1 - np.cos(theta)) * np.dot(Sn, Sn)
else:
Sr = np.array([[0, -r[2], r[1]], [r[2], 0, -r[0]], [-r[1], r[0], 0]])
theta2 = theta * theta
R = np.eye(3) + (1 - theta2 / 6.) * \
Sr + (.5 - theta2 / 24.) * np.dot(Sr, Sr)
return R
def nearest_pos_semi_def(B):
""" Least squares positive semi-definite tensor estimation
Parameters
------------
B : (3,3) array_like
B matrix - symmetric. We do not check the symmetry.
Returns
---------
npds : (3,3) array
Estimated nearest positive semi-definite array to matrix `B`.
Examples
----------
>>> B = np.diag([1, 1, -1])
>>> nearest_pos_semi_def(B)
array([[ 0.75, 0. , 0. ],
[ 0. , 0.75, 0. ],
[ 0. , 0. , 0. ]])
References
----------
.. [1] Niethammer M, San Jose Estepar R, Bouix S, Shenton M, Westin CF.
On diffusion tensor estimation. Conf Proc IEEE Eng Med Biol Soc.
2006;1:2622-5. PubMed PMID: 17946125; PubMed Central PMCID:
PMC2791793.
"""
B = np.asarray(B)
vals, vecs = npl.eigh(B)
# indices of eigenvalues in descending order
inds = np.argsort(vals)[::-1]
vals = vals[inds]
cardneg = np.sum(vals < 0)
if cardneg == 0:
return B
if cardneg == 3:
return np.zeros((3, 3))
lam1a, lam2a, lam3a = vals
scalers = np.zeros((3,))
if cardneg == 2:
b112 = np.max([0, lam1a + (lam2a + lam3a) / 3.])
scalers[0] = b112
elif cardneg == 1:
lam1b = lam1a + 0.25 * lam3a
lam2b = lam2a + 0.25 * lam3a
if lam1b >= 0 and lam2b >= 0:
scalers[:2] = lam1b, lam2b
else: # one of the lam1b, lam2b is < 0
if lam2b < 0:
b111 = np.max([0, lam1a + (lam2a + lam3a) / 3.])
scalers[0] = b111
if lam1b < 0:
b221 = np.max([0, lam2a + (lam1a + lam3a) / 3.])
scalers[1] = b221
# resort the scalers to match the original vecs
scalers = scalers[np.argsort(inds)]
return np.dot(vecs, np.dot(np.diag(scalers), vecs.T))
def sphere_distance(pts1, pts2, radius=None, check_radius=True):
""" Distance across sphere surface between `pts1` and `pts2`
Parameters
------------
pts1 : (N,R) or (R,) array_like
where N is the number of points and R is the number of
coordinates defining a point (``R==3`` for 3D)
pts2 : (N,R) or (R,) array_like
where N is the number of points and R is the number of
coordinates defining a point (``R==3`` for 3D). It should be
possible to broadcast `pts1` against `pts2`
radius : None or float, optional
Radius of sphere. Default is to work out radius from mean of the
length of each point vector
check_radius : bool, optional
If True, check if the points are on the sphere surface - i.e
check if the vector lengths in `pts1` and `pts2` are close to
`radius`. Default is True.
Returns
---------
d : (N,) or (0,) array
Distances between corresponding points in `pts1` and `pts2`
across the spherical surface, i.e. the great circle distance
See also
----------
cart_distance : cartesian distance between points
vector_cosine : cosine of angle between vectors
Examples
----------
>>> print('%.4f' % sphere_distance([0,1],[1,0]))
1.5708
>>> print('%.4f' % sphere_distance([0,3],[3,0]))
4.7124
"""
pts1 = np.asarray(pts1)
pts2 = np.asarray(pts2)
lens1 = np.sqrt(np.sum(pts1 ** 2, axis=-1))
lens2 = np.sqrt(np.sum(pts2 ** 2, axis=-1))
if radius is None:
radius = (np.mean(lens1) + np.mean(lens2)) / 2.0
if check_radius:
if not (np.allclose(radius, lens1) and
np.allclose(radius, lens2)):
raise ValueError('Radii do not match sphere surface')
# Get angle with vector cosine
dots = np.inner(pts1, pts2)
lens = lens1 * lens2
angle_cos = np.arccos(dots / lens)
return angle_cos * radius
def cart_distance(pts1, pts2):
""" Cartesian distance between `pts1` and `pts2`
If either of `pts1` or `pts2` is 2D, then we take the first
dimension to index points, and the second indexes coordinate. More
generally, we take the last dimension to be the coordinate
dimension.
Parameters
----------
pts1 : (N,R) or (R,) array_like
where N is the number of points and R is the number of
coordinates defining a point (``R==3`` for 3D)
pts2 : (N,R) or (R,) array_like
where N is the number of points and R is the number of
coordinates defining a point (``R==3`` for 3D). It should be
possible to broadcast `pts1` against `pts2`
Returns
-------
d : (N,) or (0,) array
Cartesian distances between corresponding points in `pts1` and
`pts2`
See also
--------
sphere_distance : distance between points on sphere surface
Examples
----------
>>> cart_distance([0,0,0], [0,0,3])
3.0
"""
sqs = np.subtract(pts1, pts2) ** 2
return np.sqrt(np.sum(sqs, axis=-1))
def vector_cosine(vecs1, vecs2):
""" Cosine of angle between two (sets of) vectors
The cosine of the angle between two vectors ``v1`` and ``v2`` is
given by the inner product of ``v1`` and ``v2`` divided by the
product of the vector lengths::
v_cos = np.inner(v1, v2) / (np.sqrt(np.sum(v1**2)) *
np.sqrt(np.sum(v2**2)))
Parameters
-------------
vecs1 : (N, R) or (R,) array_like
N vectors (as rows) or single vector. Vectors have R elements.
vecs1 : (N, R) or (R,) array_like
N vectors (as rows) or single vector. Vectors have R elements.
It should be possible to broadcast `vecs1` against `vecs2`
Returns
----------
vcos : (N,) or (0,) array
Vector cosines. To get the angles you will need ``np.arccos``
Notes
--------
The vector cosine will be the same as the correlation only if all
the input vectors have zero mean.
"""
vecs1 = np.asarray(vecs1)
vecs2 = np.asarray(vecs2)
lens1 = np.sqrt(np.sum(vecs1 ** 2, axis=-1))
lens2 = np.sqrt(np.sum(vecs2 ** 2, axis=-1))
dots = np.inner(vecs1, vecs2)
lens = lens1 * lens2
return dots / lens
def lambert_equal_area_projection_polar(theta, phi):
r""" Lambert Equal Area Projection from polar sphere to plane
Return positions in (y1,y2) plane corresponding to the points
with polar coordinates (theta, phi) on the unit sphere, under the
Lambert Equal Area Projection mapping (see Mardia and Jupp (2000),
Directional Statistics, p. 161).
See doc for ``sphere2cart`` for angle conventions
- $0 \le \theta \le \pi$ and $0 \le \phi \le 2 \pi$
- $|(y_1,y_2)| \le 2$
The Lambert EAP maps the upper hemisphere to the planar disc of radius 1
and the lower hemisphere to the planar annulus between radii 1 and 2,
and *vice versa*.
Parameters
----------
theta : array_like
theta spherical coordinates
phi : array_like
phi spherical coordinates
Returns
---------
y : (N,2) array
planar coordinates of points following mapping by Lambert's EAP.
"""
return 2 * np.repeat(np.sin(theta / 2), 2).reshape((theta.shape[0], 2)) * \
np.column_stack((np.cos(phi), np.sin(phi)))
def lambert_equal_area_projection_cart(x, y, z):
r""" Lambert Equal Area Projection from cartesian vector to plane
Return positions in $(y_1,y_2)$ plane corresponding to the
directions of the vectors with cartesian coordinates xyz under the
Lambert Equal Area Projection mapping (see Mardia and Jupp (2000),
Directional Statistics, p. 161).
The Lambert EAP maps the upper hemisphere to the planar disc of radius 1
and the lower hemisphere to the planar annulus between radii 1 and 2,
The Lambert EAP maps the upper hemisphere to the planar disc of radius 1
and the lower hemisphere to the planar annulus between radii 1 and 2.
and *vice versa*.
See doc for ``sphere2cart`` for angle conventions
Parameters
------------
x : array_like
x coordinate in Cartesion space
y : array_like
y coordinate in Cartesian space
z : array_like
z coordinate
Returns
----------
y : (N,2) array
planar coordinates of points following mapping by Lambert's EAP.
"""
(r, theta, phi) = cart2sphere(x, y, z)
return lambert_equal_area_projection_polar(theta, phi)
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
Code modified from the work of Christoph Gohlke link provided here
http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
Parameters
------------
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
Returns
---------
matrix : ndarray (4, 4)
Code modified from the work of Christoph Gohlke link provided here
http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
Examples
--------
>>> import numpy
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... _ = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... _ = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i + parity]
k = _NEXT_AXIS[i - parity + 1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci * ck, ci * sk
sc, ss = si * ck, si * sk
M = np.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj * si
M[i, k] = sj * ci
M[j, i] = sj * sk
M[j, j] = -cj * ss + cc
M[j, k] = -cj * cs - sc
M[k, i] = -sj * ck
M[k, j] = cj * sc + cs
M[k, k] = cj * cc - ss
else:
M[i, i] = cj * ck
M[i, j] = sj * sc - cs
M[i, k] = sj * cc + ss
M[j, i] = cj * sk
M[j, j] = sj * ss + cc
M[j, k] = sj * cs - sc
M[k, i] = -sj
M[k, j] = cj * si
M[k, k] = cj * ci
return M
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return 4x4 transformation matrix from sequence of
transformations.
Code modified from the work of Christoph Gohlke link provided here
http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
This is the inverse of the ``decompose_matrix`` function.
Parameters
-------------
scale : (3,) array_like
Scaling factors.
shear : array_like
Shear factors for x-y, x-z, y-z axes.
angles : array_like
Euler angles about static x, y, z axes.
translate : array_like
Translation vector along x, y, z axes.
perspective : array_like
Perspective partition of matrix.
Returns
---------
matrix : 4x4 array
Examples
----------
>>> import math
>>> import numpy as np
>>> import dipy.core.geometry as gm
>>> scale = np.random.random(3) - 0.5
>>> shear = np.random.random(3) - 0.5
>>> angles = (np.random.random(3) - 0.5) * (2*math.pi)
>>> trans = np.random.random(3) - 0.5
>>> persp = np.random.random(4) - 0.5
>>> M0 = gm.compose_matrix(scale, shear, angles, trans, persp)
"""
M = np.identity(4)
if perspective is not None:
P = np.identity(4)
P[3, :] = perspective[:4]
M = np.dot(M, P)
if translate is not None:
T = np.identity(4)
T[:3, 3] = translate[:3]
M = np.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = np.dot(M, R)
if shear is not None:
Z = np.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = np.dot(M, Z)
if scale is not None:
S = np.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = np.dot(M, S)
M /= M[3, 3]
return M
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
Code modified from the excellent work of Christoph Gohlke link provided
here: http://www.lfd.uci.edu/~gohlke/code/transformations.py.html
Parameters
------------
matrix : array_like
Non-degenerative homogeneous transformation matrix
Returns
---------
scale : (3,) ndarray
Three scaling factors.
shear : (3,) ndarray
Shear factors for x-y, x-z, y-z axes.
angles : (3,) ndarray
Euler angles about static x, y, z axes.
translate : (3,) ndarray
Translation vector along x, y, z axes.
perspective : ndarray
Perspective partition of matrix.
Raises
------
ValueError
If matrix is of wrong type or degenerative.
Examples
-----------
>>> import numpy as np
>>> T0=np.diag([2,1,1,1])
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
"""
M = np.array(matrix, dtype=np.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not np.linalg.det(P):
raise ValueError("matrix is singular")
scale = np.zeros((3, ), dtype=np.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = np.dot(M[:, 3], np.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = np.array((0, 0, 0, 1), dtype=np.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = np.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = np.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = np.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if np.dot(row[0], np.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
# angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def circumradius(a, b, c):
""" a, b and c are 3-dimensional vectors which are the vertices of a
triangle. The function returns the circumradius of the triangle, i.e
the radius of the smallest circle that can contain the triangle. In
the degenerate case when the 3 points are collinear it returns
half the distance between the furthest apart points.
Parameters
----------
a, b, c : (3,) array_like
the three vertices of the triangle
Returns
-------
circumradius : float
the desired circumradius
"""
x = a - c
xx = np.linalg.norm(x) ** 2
y = b - c
yy = np.linalg.norm(y) ** 2
z = np.cross(x, y)
# test for collinearity
if np.linalg.norm(z) == 0:
return np.sqrt(np.max(np.dot(x, x), np.dot(y, y),
np.dot(a - b, a - b))) / 2.
else:
m = np.vstack((x, y, z))
w = np.dot(np.linalg.inv(m.T), np.array([xx / 2., yy / 2., 0]))
return np.linalg.norm(w) / 2.
def vec2vec_rotmat(u, v):
r""" rotation matrix from 2 unit vectors
u, v being unit 3d vectors return a 3x3 rotation matrix R than aligns u to
v.
In general there are many rotations that will map u to v. If S is any
rotation using v as an axis then R.S will also map u to v since (S.R)u =
S(Ru) = Sv = v. The rotation R returned by vec2vec_rotmat leaves fixed the
perpendicular to the plane spanned by u and v.
The transpose of R will align v to u.
Parameters
-----------
u : array, shape(3,)
v : array, shape(3,)
Returns
---------
R : array, shape(3,3)
Examples
---------
>>> import numpy as np
>>> from dipy.core.geometry import vec2vec_rotmat
>>> u=np.array([1,0,0])
>>> v=np.array([0,1,0])
>>> R=vec2vec_rotmat(u,v)
>>> np.dot(R,u)
array([ 0., 1., 0.])
>>> np.dot(R.T,v)
array([ 1., 0., 0.])
"""
# Cross product is the first step to find R
# Rely on numpy instead of manual checking for failing
# cases
w = np.cross(u, v)
wn = np.linalg.norm(w)
# Check that cross product is OK and vectors
# u, v are not collinear (norm(w)>0.0)
if np.isnan(wn) or wn < np.finfo(float).eps:
norm_u_v = np.linalg.norm(u - v)
# This is the case of two antipodal vectors:
# ** former checking assumed norm(u) == norm(v)
if norm_u_v > np.linalg.norm(u):
return -np.eye(3)
return np.eye(3)
# if everything ok, normalize w
w = w / wn
# vp is in plane of u,v, perpendicular to u
vp = (v - (np.dot(u, v) * u))
vp = vp / np.linalg.norm(vp)
# (u vp w) is an orthonormal basis
P = np.array([u, vp, w])
Pt = P.T
cosa = np.dot(u, v)
sina = np.sqrt(1 - cosa ** 2)
R = np.array([[cosa, -sina, 0], [sina, cosa, 0], [0, 0, 1]])
Rp = np.dot(Pt, np.dot(R, P))
# make sure that you don't return any Nans
# check using the appropriate tool in numpy
if np.any(np.isnan(Rp)):
return np.eye(3)
return Rp
def compose_transformations(*mats):
""" Compose multiple 4x4 affine transformations in one 4x4 matrix
Parameters
-----------
mat1 : array, (4, 4)
mat2 : array, (4, 4)
...
matN : array, (4, 4)
Returns
-------
matN x ... x mat2 x mat1 : array, (4, 4)
"""
prev = mats[0]
if len(mats) < 2:
raise ValueError('At least two or more matrices are needed')
for mat in mats[1:]:
prev = np.dot(mat, prev)
return prev
def perpendicular_directions(v, num=30, half=False):
r""" Computes n evenly spaced perpendicular directions relative to a given
vector v
Parameters
-----------
v : array (3,)
Array containing the three cartesian coordinates of vector v
num : int, optional
Number of perpendicular directions to generate
half : bool, optional
If half is True, perpendicular directions are sampled on half of the
unit circumference perpendicular to v, otherwive perpendicular
directions are sampled on the full circumference. Default of half is
False
Returns
-------
psamples : array (n, 3)
array of vectors perpendicular to v
Notes
--------
Perpendicular directions are estimated using the following two step
procedure:
1) the perpendicular directions are first sampled in a unit
circumference parallel to the plane normal to the x-axis.
2) Samples are then rotated and aligned to the plane normal to vector
v. The rotational matrix for this rotation is constructed as reference
frame basis which axis are the following:
- The first axis is vector v
- The second axis is defined as the normalized vector given by the
cross product between vector v and the unit vector aligned to the
x-axis
- The third axis is defined as the cross product between the
previous computed vector and vector v.
Following this two steps, coordinates of the final perpendicular directions
are given as:
.. math::
\left [ -\sin(a_{i}) \sqrt{{v_{y}}^{2}+{v_{z}}^{2}}
\; , \;
\frac{v_{x}v_{y}\sin(a_{i})-v_{z}\cos(a_{i})}
{\sqrt{{v_{y}}^{2}+{v_{z}}^{2}}}
\; , \;
\frac{v_{x}v_{z}\sin(a_{i})-v_{y}\cos(a_{i})}
{\sqrt{{v_{y}}^{2}+{v_{z}}^{2}}} \right ]
This procedure has a singularity when vector v is aligned to the x-axis. To
solve this singularity, perpendicular directions in procedure's step 1 are
defined in the plane normal to y-axis and the second axis of the rotated
frame of reference is computed as the normalized vector given by the cross
product between vector v and the unit vector aligned to the y-axis.
Following this, the coordinates of the perpendicular directions are given
as:
\left [ -\frac{\left (v_{x}v_{y}\sin(a_{i})+v_{z}\cos(a_{i}) \right )}
{\sqrt{{v_{x}}^{2}+{v_{z}}^{2}}}
\; , \;
\sin(a_{i}) \sqrt{{v_{x}}^{2}+{v_{z}}^{2}}
\; , \;
\frac{v_{y}v_{z}\sin(a_{i})+v_{x}\cos(a_{i})}
{\sqrt{{v_{x}}^{2}+{v_{z}}^{2}}} \right ]
For more details on this calculation, see ` here <http://gsoc2015dipydki.blogspot.it/2015/07/rnh-post-8-computing-perpendicular.html>`_.
"""
v = np.array(v, dtype=float)
# Float error used for floats comparison
er = np.finfo(v[0]).eps * 1e3
# Define circumference or semi-circumference
if half is True:
a = np.linspace(0., math.pi, num=num, endpoint=False)
else:
a = np.linspace(0., 2 * math.pi, num=num, endpoint=False)
cosa = np.cos(a)
sina = np.sin(a)
# Check if vector is not aligned to the x axis
if abs(v[0] - 1.) > er:
sq = np.sqrt(v[1]**2 + v[2]**2)
psamples = np.array([- sq*sina, (v[0]*v[1]*sina - v[2]*cosa) / sq,
(v[0]*v[2]*sina + v[1]*cosa) / sq])
else:
sq = np.sqrt(v[0]**2 + v[2]**2)
psamples = np.array([- (v[2]*cosa + v[0]*v[1]*sina) / sq, sina*sq,
(v[0]*cosa - v[2]*v[1]*sina) / sq])
return psamples.T
def dist_to_corner(affine):
"""Calculate the maximal distance from the center to a corner of a voxel,
given an affine
Parameters
----------
affine : 4 by 4 array.
The spatial transformation from the measurement to the scanner space.
Returns
-------
dist: float
The maximal distance to the corner of a voxel, given voxel size encoded
in the affine.
"""
R = affine[0:3, 0:3]
vox_dim = np.diag(np.linalg.cholesky(R.T.dot(R)))
return np.sqrt(np.sum((vox_dim / 2) ** 2))
def is_hemispherical(vecs):
"""Test whether all points on a unit sphere lie in the same hemisphere.
Parameters
----------
vecs : numpy.ndarray
2D numpy array with shape (N, 3) where N is the number of points.
All points must lie on the unit sphere.
Returns
-------
is_hemi : bool
If True, one can find a hemisphere that contains all the points.
If False, then the points do not lie in any hemisphere
pole : numpy.ndarray
If `is_hemi == True`, then pole is the "central" pole of the
input vectors. Otherwise, pole is the zero vector.
References
----------
https://rstudio-pubs-static.s3.amazonaws.com/27121_a22e51b47c544980bad594d5e0bb2d04.html # noqa
"""
if vecs.shape[1] != 3:
raise ValueError("Input vectors must be 3D vectors")
if not np.allclose(1, np.linalg.norm(vecs, axis=1)):
raise ValueError("Input vectors must be unit vectors")
# Generate all pairwise cross products
v0, v1 = zip(*[p for p in itertools.permutations(vecs, 2)])
cross_prods = np.cross(v0, v1)
# Normalize them
cross_prods /= np.linalg.norm(cross_prods, axis=1)[:, np.newaxis]
# `cross_prods` now contains all candidate vertex points for "the polygon"
# in the reference. "The polygon" is a subset. Find which points belong to
# the polygon using a dot product test with each of the original vectors
angles = np.arccos(np.dot(cross_prods, vecs.transpose()))
# And test whether it is orthogonal or less
dot_prod_test = angles <= np.pi / 2.0
# If there is at least one point that is orthogonal or less to each
# input vector, then the points lie on some hemisphere
is_hemi = len(vecs) in np.sum(dot_prod_test.astype(int), axis=1)
if is_hemi:
vertices = cross_prods[
np.sum(dot_prod_test.astype(int), axis=1) == len(vecs)
]
pole = np.mean(vertices, axis=0)
pole /= np.linalg.norm(pole)
else:
pole = np.array([0.0, 0.0, 0.0])
return is_hemi, pole
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
from collections import namedtuple
import copy
import math
import os
import re
import warnings
import numpy as np
from ..common.constants import FLOAT_dtype
from ..common.functions import pyNormalizeAngle
from ..geometry import affineTransform
from .base import _union, loadImage, BaseReader
# utility functions
def getKITTIGroundTruth(labels, categories, categoriesOpt, mode = 'moderate'):
"""Get mandatory and optional ground truth out of a list of labels
The KITTI dataset defines criteria which labels have to be detected in order to avoid a false negative. There are three evaluation
modes 'easy', 'moderate' and 'hard'. To avoid penalizing good detection algorithms labels which are hard to detect become optional
ground truth so their detection will not result in a false positive. The harder the mode the more labels become mandatory ground truth.
The category 'dontcare' is always included in optional ground truth.
:Parameters:
labels: list of dicts
List with labels provided by dataset reader
categories: list of strings
Mandatory categories
categoriesOpt: list of strings
Optional categories
mode: string, optional
Evaluation mode, 'moderate' by default
:Returns: mandatory and optional ground truth
:Returntype: tuple(groundTruth: list, groundTruthOpt: list)
"""
# evaluation modes according to KITTI object evaluation rules
MODES = {
'easy': {'minHeight': 40, 'maxOcclusion': 0, 'maxTruncation': 0.15},
'moderate': {'minHeight': 25, 'maxOcclusion': 1, 'maxTruncation': 0.30},
'hard': {'minHeight': 25, 'maxOcclusion': 2, 'maxTruncation': 0.50},
}
mode = MODES[mode] # get mode dictionary
groundTruth = [] # ground truth (missing detection is a false negative)
groundTruthOpt = [] # optional ground truth (missing detection is ok)
for label in labels:
if label['category'] in categoriesOpt or label['category'] == 'dontcare':
groundTruthOpt.append(label)
elif label['category'] in categories:
if label['info']['occluded'] > mode['maxOcclusion']:
groundTruthOpt.append(label)
elif label['info']['truncated'] > mode['maxTruncation']:
groundTruthOpt.append(label)
elif label['box2D']['bottom'] - label['box2D']['top'] < mode['minHeight']:
groundTruthOpt.append(label)
else:
# label not optional
groundTruth.append(label)
return (groundTruth, groundTruthOpt)
def correctKITTILabelForStereo(label):
"""Roughly adjust for an empirically estimated labeling error w.r.t. stereo reconstruction in KITTI dataset"""
# TODO: check extensively
base = 15.0
scale = 1.07
new_label = copy.deepcopy(label)
if new_label['box3D']['location']['z'] > base:
new_label['box3D']['location']['z'] = base + (new_label['box3D']['location']['z']-base)*scale
return new_label
def writeLabels(labels, filepath, includeAlpha = True):
"""Write labels to file
The produced file can be used for evaluation on KITTI servers. Labels are expected to use the camera coordinate system.
:Parameters:
labels: list
List with object labels
filepath: string
Path to the file to use
includeAlpha: bool, optional
Write alpha values (observation angle) to file, *True* by default
"""
def label2line(label):
# convert lower case category to KITTI categories
category = label['category']
if category == 'dontcare':
category = 'DontCare'
else:
category = category[0].upper() + category[1:]
# compute alpha if required
if includeAlpha:
# set to object orientation
alpha = label['box3D']['rotation_y']
# adjust to X/Z observation angle of object center
alpha -= -math.atan2(label['box3D']['location']['z'], label['box3D']['location']['x']) - 1.5*math.pi
# wrap to +/-Pi
alpha = pyNormalizeAngle(alpha)
# convert to string
alpha = '%.2f' % alpha
else:
# set to KITTI default (invalid) value
alpha = '-10'
label_line = '%(category)s %(truncated).2f %(occluded)d %(alpha)s %(left).2f %(top).2f %(right).2f %(bottom).2f %(height).2f %(width).2f %(length).2f %(x).2f %(y).2f %(z).2f %(rotation_y).2f %(score).2f\n' % {
'category': category,
'truncated': label['info']['truncated'],
'occluded': -1, # invalid value to be ignored by KITTI evaluation
'alpha': alpha,
'left': label['box2D']['left'],
'top': label['box2D']['top'],
'right': label['box2D']['right'],
'bottom': label['box2D']['bottom'],
'height': label['box3D']['dimensions']['height'],
'width': label['box3D']['dimensions']['width'],
'length': label['box3D']['dimensions']['length'],
'x': label['box3D']['location']['x'],
'y': label['box3D']['location']['y'] + label['box3D']['dimensions']['height'] / 2.0,
'z': label['box3D']['location']['z'],
'rotation_y': label['box3D']['rotation_y'],
'score': label['info']['weight']*100, # multiply by 100 to avoid precision loss
}
return label_line
with open(filepath, mode='w') as f:
for label in labels:
f.write(label2line(label))
# named tuples definition
_NavigationInfo = namedtuple('_NavigationInfo', [
'lat', # latitude of the oxts-unit (deg)
'lon', # longitude of the oxts-unit (deg)
'alt', # altitude of the oxts-unit (m)
'roll', # roll angle (rad), 0 = level, positive = left side up (-pi..pi)
'pitch', # pitch angle (rad), 0 = level, positive = front down (-pi/2..pi/2)
'yaw', # heading (rad), 0 = east, positive = counter clockwise (-pi..pi)
'vn', # velocity towards north (m/s)
've', # velocity towards east (m/s)
'vf', # forward velocity, i.e. parallel to earth-surface (m/s)
'vl', # leftward velocity, i.e. parallel to earth-surface (m/s)
'vu', # upward velocity, i.e. perpendicular to earth-surface (m/s)
'ax', # acceleration in x, i.e. in direction of vehicle front (m/s^2)
'ay', # acceleration in y, i.e. in direction of vehicle left (m/s^2)
'az', # acceleration in z, i.e. in direction of vehicle top (m/s^2)
'af', # forward acceleration (m/s^2)
'al', # leftward acceleration (m/s^2)
'au', # upward acceleration (m/s^2)
'wx', # angular rate around x (rad/s)
'wy', # angular rate around y (rad/s)
'wz', # angular rate around z (rad/s)
'wf', # angular rate around forward axis (rad/s)
'wl', # angular rate around leftward axis (rad/s)
'wu', # angular rate around upward axis (rad/s)
'posacc', # velocity accuracy (north/east in m)
'velacc', # velocity accuracy (north/east in m/s)
'navstat', # navigation status
'numsats', # number of satellites tracked by primary GPS receiver
'posmode', # position mode of primary GPS receiver
'velmode', # velocity mode of primary GPS receiver
'orimode', # orientation mode of primary GPS receiver
])
# dataset readers implementation
class KITTIReader(BaseReader):
"""Abstract data extractor for KITTI_ datasets
This class relies on presence of at least one image for every frame to detect available frames. Lidar data is optional.
See :class:`~.base.BaseReader` for more information.
.. _KITTI: http://www.cvlibs.net/datasets/kitti/
"""
def getDatasets(self):
raise NotImplementedError("getDatasets() is not implemented in KITTIReader")
def getFrameIds(self, dataset = None):
def _filesToFrames(filenames):
def _getFrameId(filename):
match = re.match("(\d{6}).png", filename)
if match:
return int(match.groups()[0])
else:
return None
return [frameId for frameId in map(_getFrameId, filenames) if frameId is not None]
image_lists = [os.listdir(image_dir) for image_dir in self._getImageDirs(dataset) if os.path.isdir(image_dir)]
return sorted(list(_union(*map(_filesToFrames, image_lists))))
def getFrameInfo(self, frameId, dataset = None):
"""Get information about a frame in the specified dataset
:Parameters:
frameId: int
Frame ID of the requested frame
dataset: str or None, optional
Dataset with the requested frame, only required by :class:`KITTITrackletsReader`
:Returns: Dictionary with information about the frame:
'dataset': str or None
Dataset of the frame, *None* for :class:`KITTIObjectsReader`
'frameId': int
Frame ID
'img_left': NumPy array
Left image, can be None
'img_right': NumPy array
Right image, can be None
'lidar': dict{'XYZ': NumPy array, 'RGB': NumPy array}
XYZ (transformed to camera coordinates, rectification matrix applied) and RGB (reflectance, gray) data from lidar sensor, can be None
'calibration': dict
Calibration matrices
'reprojection': np.matrix[FLOAT_dtype]
3D reconstruction out of disparity, shape: (4, 4)
'projection_left': np.matrix[FLOAT_dtype]
Projection of camera coordinates to left camera image, shape: (3, 4)
'projection_right': np.matrix[FLOAT_dtype]
Projection of camera coordinates to right camera image, shape: (3, 4)
'rect': np.matrix[FLOAT_dtype]
Rectification matrix, shape: (4, 4)
'lidar2cam': np.matrix[FLOAT_dtype]
Transformation from lidar to camera coordinates, shape: (4, 4)
'labels': list of dictionaries
List with labels in this frame. Each label contains the following keys:
'category': string
Possible values: 'car', 'van', 'truck', 'pedestrian', 'person_sitting', 'cyclist', 'tram', 'misc', 'dontcare'
'box2D': dict
Bounding box in the left image, keys: *'left'*, *'top'*, *'right'*, *'bottom'*
'box3D': dict
'location': dict
Center of the 3D box, keys: *'x'*, *'y'*, *'z'*
'dimensions': dict
Size of the 3D box, keys: *'height'*, *'width'*, *'length'*
'rotation_y': float
Object rotation around Y-axis in camera coordinates [-pi...pi], 0 = facing along X-axis
'info': dict
'truncated': float
Float from 0 (non-truncated) to 1 (truncated), where *truncated* refers to the object leaving image boundaries
'occluded': int
Occlusion status (0 = fully visible, 1 = partly occluded, 2 = largely occluded, 3 = unknown)
'trackId': int, optional
Unique tracking id of this object within this sequence, supplied only by `KITTITrackletsReader`
"""
img_dir_left, img_dir_right = self._getImageDirs(dataset)
img_file_left = os.path.join(img_dir_left, "%06d.png" % frameId)
img_file_right = os.path.join(img_dir_right, "%06d.png" % frameId)
calibration = self._getCamCalibration(frameId, dataset)
return {
'dataset': dataset,
'frameId': frameId,
'img_left': loadImage(img_file_left) if os.path.isfile(img_file_left) else None,
'img_right': loadImage(img_file_right) if os.path.isfile(img_file_right) else None,
'calibration': calibration,
'lidar': self._getLidarPoints(calibration, frameId, dataset),
'labels': self._getFrameLabels(frameId, dataset),
}
# -- directory functions ---
def _getImageDirs(self, dataset = None):
raise NotImplementedError("_getImageDirs() is not implemented in KITTIReader")
def _getLidarDir(self, dataset = None):
raise NotImplementedError("_getLidarDir() is not implemented in KITTIReader")
def _getCalibrationDir(self):
return os.path.join(self._dir, "calib")
def _getLabelsDir(self):
raise NotImplementedError("_getLabelsDir() is not implemented in KITTIReader")
# --- internal functions ---
def _getLabelData(self, values):
# function expects the first value in values list being the category
labelData = {
# see KITTI's devkit/readme.txt
'type': values[0], # category of object: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare'
'truncated': float(values[1]), # float from 0 (non-truncated) to 1 (truncated), where truncated refers to the object leaving image boundaries
'occluded': int(values[2]), # integer (0,1,2,3) indicating occlusion state: 0 = fully visible, 1 = partly occluded, 2 = largely occluded, 3 = unknown
'alpha': float(values[3]), # observation angle of object, ranging [-pi..pi]
'bbox': { # 2D bounding box of object in the image
'left': float(values[4]),
'top': float(values[5]),
'right': float(values[6]),
'bottom': float(values[7]),
},
'dimensions': { # 3D object dimensions
'height': float(values[8]),
'width': float(values[9]),
'length': float(values[10]),
},
'location': { # location of front-center-bottom point of the 3D bounding box
'x': float(values[11]),
'y': float(values[12]),
'z': float(values[13]),
},
'rotation_y': float(values[14]), # rotation ry around Y-axis in camera coordinates [-pi..pi], 0 = facing along X-axis
}
return labelData
def _processLabel(self, kitti_label):
"""Transform KITTI label to universal format
See :class:`~.base.BaseReader` for label format description.
"""
label = {
'category': kitti_label['type'].lower(),
'box2D': kitti_label['bbox'].copy(),
'box3D': {
'location': {
'x': kitti_label['location']['x'],
'y': kitti_label['location']['y'] - kitti_label['dimensions']['height'] / 2.0, # move to center
'z': kitti_label['location']['z'],
},
'dimensions': kitti_label['dimensions'].copy(),
'rotation_y': kitti_label['rotation_y'],
},
'info': {
'truncated': kitti_label['truncated'],
'occluded': kitti_label['occluded'],
},
}
if 'trackId' in kitti_label:
# set trackId if given
label['info']['trackId'] = kitti_label['trackId']
return label
def _getFrameLabels(self, frameId, dataset = None):
raise NotImplementedError("_getFrameLabels() is not implemented in KITTIReader")
def _getCamCalibration(self, frameId, dataset = None):
raise NotImplementedError("_getCamCalibration() is not implemented in KITTIReader")
def _readCamCalibration(self, filename):
def line2values(line):
return [float(v) for v in line.strip().split(" ")[1:]]
def getMatrix(values, shape):
return np.matrix(values, dtype = FLOAT_dtype).reshape(shape)
def padMatrix(matrix_raw):
matrix = np.matrix(np.zeros((4,4), dtype = FLOAT_dtype), copy = False)
matrix[:matrix_raw.shape[0], :matrix_raw.shape[1]] = matrix_raw
matrix[3, 3] = 1
return matrix
with open(filename, 'r') as f:
data = f.read().split("\n")
#P0 = getMatrix(line2values(data[0]), (3, 4))
#P1 = getMatrix(line2values(data[1]), (3, 4))
P2 = getMatrix(line2values(data[2]), (3, 4))
P3 = getMatrix(line2values(data[3]), (3, 4))
Rect = padMatrix(getMatrix(line2values(data[4]), (3, 3)))
Velo2Cam = padMatrix(getMatrix(line2values(data[5]), (3, 4)))
#Imu2Velo = padMatrix(getMatrix(line2values(data[6]), (3, 4)))
P_left = P2
P_right = P3
# see for example http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#stereorectify
f = P_left[0, 0]
Tx = (P_right[0, 3] - P_left[0, 3]) / f
cx_left = P_left[0, 2]
cx_right = P_right[0, 2]
cy = P_left[1, 2]
# Depth = f*(T/disparity), see for example http://www.gergltd.com/cse486/project4/
# see http://stackoverflow.com/questions/11406849/using-opencv-to-generate-3d-points-assuming-frontal-parallel-configuration
reprojection = np.matrix([
[1, 0, 0, -cx_left],
[0, 1, 0, -cy],
[0, 0, 0, f],
[0, 0, -1/Tx, (cx_left - cx_right) / Tx],
], dtype = FLOAT_dtype)
result = {
'projection_left': P_left,
'projection_right': P_right,
'rect': Rect,
'velo2cam': Velo2Cam,
'reprojection': reprojection,
}
return result
def _getLidarPoints(self, calibration, frameId, dataset = None):
filename = os.path.join(self._getLidarDir(dataset), '%06d.bin' % frameId)
if not os.path.isfile(filename):
# no data
return None
# read matrix
data = np.fromfile(filename, np.float32)
# reshape to points with 4 coordinates each
data = data.reshape(data.shape[0] // 4, 4)
# XYZ coordinates transformed to camera coordinates
XYZ = affineTransform(data[:, :3], calibration['rect']*calibration['velo2cam'])
# reflectance
R = (256 * data[:, 3]).astype(np.uint8)
# convert reflectance to RGB
RGB = np.ndarray((XYZ.shape[0], 3), dtype = np.uint8)
RGB[:, 0] = R
RGB[:, 1] = R
RGB[:, 2] = R
return {'XYZ': XYZ, 'RGB': RGB}
class KITTIObjectsReader(KITTIReader):
"""Data extractor for KITTI objects dataset
The data directory must contain the directories *'calib'*, *'image_2'* and/or *'image_3'* and optionally *'label_2'*.
See :class:`KITTIReader` for more information.
"""
def getDatasets(self):
return ['None'] # return the one and only dataset
# -- directory functions ---
def _getImageDirs(self, dataset = None):
# we have no datasets, ignore the dataset parameter
return (os.path.join(self._dir, "image_2"), os.path.join(self._dir, "image_3"))
def _getLidarDir(self, dataset = None):
# we have no datasets, ignore the dataset parameter
return os.path.join(self._dir, "velodyne")
def _getLabelsDir(self):
dir = os.path.join(self._dir, "label_2")
if os.path.exists(dir):
return dir
else:
# no labels given
return None
# --- internal functions ---
def _getFrameLabels(self, frameId, dataset = None):
if self._getLabelsDir() is None:
# no labels given
return []
else:
with open(os.path.join(self._getLabelsDir(), "%06d.txt" % frameId), 'r') as f:
text_data = [[value for value in line.split(" ")] for line in f.read().split("\n") if line]
labels = []
for line in text_data:
# get label data (starting with the category)
labelData = self._getLabelData(line)
# transform to universal label format and append to labels
labels.append(self._processLabel(labelData))
return labels
def _getCamCalibration(self, frameId, dataset = None):
return self._readCamCalibration(os.path.join(self._getCalibrationDir(), "%06d.txt" % frameId))
class KITTITrackletsReader(KITTIReader):
"""Data extractor for KITTI tracklets dataset
The data directory must contain the directories *'calib'*, *'image_02'* and/or *'image_03'*.
Optional directories are *'label_02'* and *'oxts'*.
See :class:`KITTIReader` for more information.
"""
def __init__(self, directory):
super(KITTITrackletsReader, self).__init__(directory)
# initialize cache
self._cache = {
'calibration': {},
'labels': {},
'oxts': {'files': {}, 'data': {}},
}
def getDatasets(self):
# use a dummy dataset, we only need the directory above it
return sorted(list(_union(*[os.listdir(os.path.dirname(image_dir)) for image_dir in self._getImageDirs('0')])))
def getFrameInfo(self, frameId, dataset):
"""See :func:`~pydriver.datasets.base.BaseReader.getFrameInfo` for general description
If navigation data is available (in the *'oxts'* directory), the result will contain an additional key:
'navigation': OrderedDict
'lat': float
latitude of the oxts-unit (deg)
'lon': float
longitude of the oxts-unit (deg)
'alt': float
altitude of the oxts-unit (m)
'roll': float
roll angle (rad), 0 = level, positive = left side up (-pi..pi)
'pitch': float
pitch angle (rad), 0 = level, positive = front down (-pi/2..pi/2)
'yaw': float
heading (rad), 0 = east, positive = counter clockwise (-pi..pi)
'vn': float
velocity towards north (m/s)
've': float
velocity towards east (m/s)
'vf': float
forward velocity, i.e. parallel to earth-surface (m/s)
'vl': float
leftward velocity, i.e. parallel to earth-surface (m/s)
'vu': float
upward velocity, i.e. perpendicular to earth-surface (m/s)
'ax': float
acceleration in x, i.e. in direction of vehicle front (m/s^2)
'ay': float
acceleration in y, i.e. in direction of vehicle left (m/s^2)
'az': float
acceleration in z, i.e. in direction of vehicle top (m/s^2)
'af': float
forward acceleration (m/s^2)
'al': float
leftward acceleration (m/s^2)
'au': float
upward acceleration (m/s^2)
'wx': float
angular rate around x (rad/s)
'wy': float
angular rate around y (rad/s)
'wz': float
angular rate around z (rad/s)
'wf': float
angular rate around forward axis (rad/s)
'wl': float
angular rate around leftward axis (rad/s)
'wu': float
angular rate around upward axis (rad/s)
'posacc': float
velocity accuracy (north/east in m)
'velacc': float
velocity accuracy (north/east in m/s)
'navstat': int
navigation status
'numsats': int
number of satellites tracked by primary GPS receiver
'posmode': int
position mode of primary GPS receiver
'velmode': int
velocity mode of primary GPS receiver
'orimode': int
orientation mode of primary GPS receiver
"""
info = super(KITTITrackletsReader, self).getFrameInfo(frameId, dataset)
# add navigation information if available
oxts = self._getOxtsInfo(frameId, dataset)
if oxts is not None:
info['navigation'] = oxts
return info
# -- directory functions ---
def _getImageDirs(self, dataset):
return (os.path.join(self._dir, "image_02", dataset), os.path.join(self._dir, "image_03", dataset))
def _getLidarDir(self, dataset = None):
return os.path.join(self._dir, "velodyne", dataset)
def _getLabelsDir(self):
dir = os.path.join(self._dir, "label_02")
if os.path.exists(dir):
return dir
else:
# no labels given
return None
def _getOxtsDir(self):
oxtsDir = os.path.join(self._dir, "oxts")
if os.path.exists(oxtsDir):
return oxtsDir
else:
# no oxts data given
warnings.warn(UserWarning('"{}" directory not found, navigation data not available.'.format(oxtsDir)))
return None
def _getOxtsFile(self, dataset):
oxtsDir = self._getOxtsDir()
if oxtsDir is None:
return None
oxtsFile = os.path.join(oxtsDir, '{}.txt'.format(dataset))
if os.path.exists(oxtsFile):
return oxtsFile
else:
# no oxts data given
warnings.warn(UserWarning('"{}" file not found, navigation data for dataset {} not available.'.format(oxtsFile, dataset)))
return None
# --- internal functions ---
def _getFrameLabels(self, frameId, dataset):
if self._getLabelsDir() is None:
# no labels given
return []
else:
return self._getDatasetLabels(dataset).get(frameId, [])
def _getDatasetLabels(self, dataset):
if dataset not in self._cache['labels']:
with open(os.path.join(self._getLabelsDir(), "%s.txt" % dataset), 'r') as f:
text_data = [[value for value in line.split(" ")] for line in f.read().split("\n") if line]
labels = {}
for line in text_data:
frameId = int(line[0])
if frameId not in labels:
labels[frameId] = []
# get label data (starting with the category)
labelData = self._getLabelData(line[2:])
# unique tracking id of this object within this sequence (specific to tracklets dataset)
labelData['trackId'] = int(line[1])
# transform to universal label format
label = self._processLabel(labelData)
labels[frameId].append(label)
self._cache['labels'][dataset] = labels
return self._cache['labels'][dataset]
def _getCamCalibration(self, frameId, dataset):
if dataset not in self._cache['calibration']:
self._cache['calibration'][dataset] = self._readCamCalibration(os.path.join(self._getCalibrationDir(), "%s.txt" % dataset))
return self._cache['calibration'][dataset]
def _getOxtsInfo(self, frameId, dataset):
"""Extract oxts navigation data"""
if dataset not in self._cache['oxts']['files']:
# dataset file not in cache
oxtsFile = self._getOxtsFile(dataset)
if oxtsFile is None:
# no oxts file
self._cache['oxts']['files'][dataset] = None
else:
# read file lines
with open(oxtsFile, 'r') as f:
lines = [line for line in f.read().strip().split('\n') if line]
self._cache['oxts']['files'][dataset] = lines
# initialize dataset dictionary, key: frameId, value: _NavigationInfo-like OrderedDict or None
self._cache['oxts']['data'][dataset] = {}
# assertion: self._cache['oxts']['files'][dataset] exists (list of strings or None)
# assertion: self._cache['oxts']['data'][dataset] dict exists (can be empty)
if frameId not in self._cache['oxts']['data'][dataset]:
# get text file lines for this dataset
lines = self._cache['oxts']['files'][dataset]
if lines is None:
# no information available
self._cache['oxts']['data'][dataset][frameId] = None
else:
if frameId >= len(lines):
raise ValueError('Navigation information for frame {} in dataset {} not found.'.format(frameId, dataset))
# list with values (as strings) in text file line for this frame
values_str = lines[frameId].strip().split(' ')
# process and cache frame data
values_float = [float(v) for v in values_str[:25]]
values_int = [int(v) for v in values_str[25:]]
self._cache['oxts']['data'][dataset][frameId] = _NavigationInfo(*tuple(values_float + values_int))._asdict()
# assertion: self._cache['oxts']['data'][dataset][frameId] exists (_NavigationInfo-like OrderedDict or None)
return self._cache['oxts']['data'][dataset][frameId]
|
|
#!/usr/bin/python
import sys
import zmq
import random
import time
import os, pwd
import datetime
import json
import getopt
import socket
import happybase
import hashlib
import struct
import traceback
import re
# adjust to match your $PREFIX if you specified one
# default PREFIX = /usr/local
sys.path.append('/usr/local/lib/cif-protocol/pb-python/gen-py')
import msg_pb2
import feed_pb2
import control_pb2
import RFC5070_IODEF_v1_pb2
import MAEC_v2_pb2
import cifsupport
sys.path.append('../../libcif/lib')
from CIF.CtrlCommands.Clients import *
from CIF.CtrlCommands.Ping import *
from CIF.Foundation import Foundation
from DB.APIKeys import *
from DB.Exploder import Exploder
from DB.Registry import Registry
from DB.Query import Query
from DB.Purger import Purger
from DB.Salt import Salt
from DB.PrimaryIndex import PrimaryIndex
from DB.SecondaryIndex import SecondaryIndex
from DB.Log import Log
from CIF.CtrlCommands.ThreadTracker import ThreadTracker
print "cif-db proof of concept"
"""
Two threads:
Attach to cif-router PUB:
Subscribe to all message types
Write all messages we receive to HBase
Attach to cif-router ROUTER:
When we receive a query request:
retrieve the requested information
send it back to the requester
"""
def usage():
print "\
# poc-db [-c 5656] [-r cif-router:5555] [-H hbase host] [-m name]\n\
# -c control port (REQ - for inbound messages)\n\
# -r cif-router hostname:port\n\
# -m my name\n"
def HBConnection(hbhost):
pool = happybase.ConnectionPool(size=25, host=hbhost)
with pool.connection() as connection:
t = connection.tables()
print "found tables: ", t
if not "cif_idl" in t:
raise Exception("missing cif_idl table")
if not "cif_objs" in t:
raise Exception("missing cif_objs table")
return pool
"""
Given a msg object, we want to record its IDL (for posterity)
to cif_idl if it hasn't been already. We then write the actual object
to cif_objs.
# rowkey $salt$timestamp$hash (eg "<2 byte salt><8 byte timestamp><16 byte md5>")
# cf:$submsgtype (eg cf:RFC5070-IODEF-v1=object)
"""
def saveIDL(cif_idl, sr):
#bot = sr.baseObjectType;
bot = re.sub('_', '-', sr.baseObjectType)
fn = cifsupport.installBase() + "/" + bot + ".proto"
#print "IDL should be: " + fn
def writeToDb(cif_objs, cif_idl, sr, salt):
#print "\tWrite message(s) to db: " + str(sr.baseObjectType)
ts = int(time.time()) # ignore fractional seconds
md5 = hashlib.md5()
md5.update(sr.SerializeToString())
hash = md5.digest()
colspec = "cf:" + str(sr.baseObjectType)
try:
saveIDL(cif_idl, sr)
rowid = struct.pack(">HI16s", salt, ts, hash)
cif_objs.put(rowid, {colspec: sr.data})
#print "\tput: rowid:" + rowid.encode('hex') + " " + colspec + " "
except struct.error, err:
print "Failed to pack rowid: ", err
def apikey_row_to_akr(row):
akr = control_pb2.APIKeyResponse()
akr.alias = row['alias']
akr.revoked = row['revoked']
akr.expires = row['expires']
akr.restrictedAccess = row['restrictedAccess']
akr.writeAccess = row['writeAccess']
akr.description = row['description']
akr.created = row['created']
akr.parent = row['parent']
akgl = []
for group in row['groups']:
akg = control_pb2.APIKeyGroup()
akg.groupname = row['groups'][group]
akg.groupid = group
if akg.groupid == row['defaultGroup']:
akg.default = True
else:
akg.default = False
akgl.append(akg)
akr.groupsList.extend(akgl)
return akr
def controlMessageHandler(msg, params):
if debug > 0:
print "controlMessageHandler: Got a control message: "#, msg
connectionPool = None
if params != None:
if 'connectionPool' in params:
connectionPool = params['connectionPool']
if msg.type == control_pb2.ControlType.COMMAND:
thread_tracker.add(id=threading.current_thread().ident, user=pwd.getpwuid(os.getuid())[0], host=socket.gethostname(), state='Running', info="controlMessageHandler",
command=control_pb2._CONTROLTYPE_COMMANDTYPE.values_by_number[msg.command].name)
if msg.command == control_pb2.ControlType.PING:
c = Ping.makereply(msg)
cf.sendmsg(c, None)
elif msg.command == control_pb2.ControlType.APIKEY_GET:
print "controlMessageHandler: APIKEY_GET ", msg.apiKeyRequest.apikey
k = apikeys.get_by_key(msg.apiKeyRequest.apikey)
msg.type = control_pb2.ControlType.REPLY
if k == {}:
print "APIKEY_GET Key lookup failed."
msg.status = control_pb2.ControlType.FAILED
else:
print "APIKEY_GET Key lookup succeeded."
msg.status = control_pb2.ControlType.SUCCESS
akr = apikey_row_to_akr(k)
akr.apikey = msg.apiKeyRequest.apikey
msg.apiKeyResponseList.extend([akr])
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
print "controlMessageHandler: APIKEY_GET sending reply.."
cf.sendmsg(msg, None)
elif msg.command == control_pb2.ControlType.APIKEY_LIST:
print "controlMessageHandler: APIKEY_LIST ", msg.apiKeyRequest.apikey
ks = apikeys.list_by_key(msg.apiKeyRequest.apikey)
akr_list = []
for kkey in ks:
kval = ks[kkey]
akr = apikey_row_to_akr(kval)
akr.apikey = kkey
akr_list.append(akr)
msg.apiKeyResponseList.extend(akr_list)
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.type = control_pb2.ControlType.REPLY
msg.status = control_pb2.ControlType.SUCCESS
cf.sendmsg(msg, None)
elif msg.command == control_pb2.ControlType.APIKEY_ADD:
print "controlMessageHandler: APIKEY_ADD ", msg.apiKeyRequest.apikey
msg.type = control_pb2.ControlType.REPLY
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
try:
apikeys.add_key(msg.apiKeyRequest)
msg.status = control_pb2.ControlType.SUCCESS
except Exception as e:
print "FAILED with " + str(e)
msg.statusMsg = str(e)
msg.status = control_pb2.ControlType.FAILED
cf.sendmsg(msg, None)
elif msg.command == control_pb2.ControlType.APIKEY_UPDATE:
print "controlMessageHandler: APIKEY_UPDATE ", msg.apiKeyRequest.apikey
msg.type = control_pb2.ControlType.REPLY
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
try:
apikeys.update_key(msg.apiKeyRequest)
msg.status = control_pb2.ControlType.SUCCESS
except Exception as e:
msg.status = control_pb2.ControlType.FAILED
print "FAILED with " + str(e)
msg.statusMsg = str(e)
cf.sendmsg(msg, None)
elif msg.command == control_pb2.ControlType.APIKEY_DEL:
print "controlMessageHandler: APIKEY_DEL ", msg.apiKeyRequest.apikey
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.FAILED
try:
if msg.apiKeyRequest.apikey == '' and msg.apiKeyRequest.alias != '':
apikeys.remove_by_alias(msg.apiKeyRequest.alias)
else:
apikeys.remove_by_key(msg.apiKeyRequest.apikey)
msg.status = control_pb2.ControlType.SUCCESS
except Exception as e:
msg.statusMsg = str(e)
cf.sendmsg(msg, None)
elif msg.command == control_pb2.ControlType.THREADS_LIST:
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
thread_tracker.asmessage(msg.listThreadsResponse)
cf.sendmsg(msg, None)
elif msg.command == control_pb2.ControlType.CIF_QUERY_REQUEST:
qrs = []
tmp = msg.dst
msg.dst = msg.src
msg.src = tmp
msg.status = control_pb2.ControlType.SUCCESS
for i in range(0, len(msg.queryRequestList.query)):
qe = Query(connectionPool, primary_index, secondary_index, True) # TODO move this line outside of this routine
qe.setqr(msg.queryRequestList.query[i])
qe.setlimit(msg.queryRequestList.limit)
try:
qresponse = qe.execqr()
qrs.append(qresponse)
except Exception as e:
msg.status = control_pb2.ControlType.FAILED
msg.statusMsg = str(e)
msg.queryResponseList.extend(qrs)
cf.sendmsg(msg, None)
thread_tracker.remove(threading.current_thread().ident)
try:
opts, args = getopt.getopt(sys.argv[1:], 'c:r:m:D:H:h')
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
controlport = "5656"
cifrouter = "sdev.nickelsoft.com:5555"
myid = "cif-db"
apikey = "a8fd97c3-9f8b-477b-b45b-ba06719a0088"
debug = 0
global hbhost
hbhost = "localhost"
for o, a in opts:
if o == "-c":
controlport = a
elif o == "-m":
myid = a
elif o == "-r":
cifrouter = a
elif o == "-h":
usage()
sys.exit(2)
elif o == "-H":
hbhost = a
elif o == "-D":
debug = a
myip = "127.0.0.1"
try:
myip = socket.gethostbyname(socket.gethostname()) # has caveats
except Exception as e:
print "can't determine myip based on my hostname: ", socket.gethostname()
global cf
global exploder
global primary_index
global secondary_index
global thread_tracker
try:
print "Connect to HBase"
connectionPool = HBConnection(hbhost)
with connectionPool.connection() as connection:
cif_objs = connection.table('cif_objs').batch(batch_size=5) # set very low for development, set to 1000+ for test/qa/prod
cif_idl = connection.table('cif_idl')
print "Init Registry"
registry = Registry(connectionPool, debug)
num_servers = registry.get('hadoop.num_servers')
if num_servers == None:
num_servers = 1
print "hadoop.num_servers not set. defaulting."
print "hadoop.num_servers = ", num_servers
salt = Salt(num_servers, debug)
thread_tracker = ThreadTracker(debug)
global apikeys
log = Log(connectionPool)
log.L("cif-db initializing")
print "Initializing APIKeys object"
apikeys = APIKeys(connection, True)
print "Resolving our APIKey: " + myid
apikey = apikeys.get_by_alias(myid)
print "Initializing foundation"
cf = Foundation({'apikey' : apikey,
'myip' : myip,
'cifrouter' : cifrouter,
'controlport' : controlport,
'myid' : myid,
'routerid' : "cif-router",
'thread_tracker' : thread_tracker
})
primary_index = PrimaryIndex(connectionPool, debug)
secondary_index = SecondaryIndex(connectionPool, debug)
print "Configuring foundation"
cf.setdebug(debug)
cf.setdefaultcallback(controlMessageHandler, {'connectionPool': connectionPool})
print "Register with " + cifrouter + " (req->rep)"
req = cf.ctrlsocket()
# apikey, req, myip, myid, cifrouter
(routerport, routerpubport) = cf.register()
subscriber = cf.subscribersocket()
time.sleep(1) # wait for router to connect, sort of lame but see this a lot in zmq code
print "Initializing Exploder"
exploder = Exploder.Exploder(connectionPool, thread_tracker, False)
print "Initializing Purger"
purger = Purger.Purger(connectionPool, num_servers, thread_tracker, True)
while True:
msg = msg_pb2.MessageType()
msg.ParseFromString(subscriber.recv())
if apikeys.is_valid(msg.apikey):
if msg.type == msg_pb2.MessageType.SUBMISSION and len(msg.submissionRequest) > 0:
#print "Got a SUBMISSION. Saving."
for i in range(0, len(msg.submissionRequest)):
writeToDb(cif_objs, cif_idl, msg.submissionRequest[i], salt.next())
# ignore QUERY logic at present, see controlmessagehandler, above, instead
# we arent processing QUERYs recvd via this PUB/SUB connection
elif msg.type == msg_pb2.MessageType.QUERY and len(msg.queryRequest) > 0:
print "Got an unexected QUERY on PUB/SUB interface"
else:
print "Wrong or empty message recvd on subscriber port. Expected submission or query (" + \
str(msg_pb2.MessageType.SUBMISSION) + " or " + \
str(msg_pb2.MessageType.QUERY) + ") got " + \
str(msg.type) + " number of parts (should be > 0) SR:" + \
str(len(msg.submissionRequest)) + " / QR:" + str(len(msg.queryRequest))
else:
print "message has an invalid apikey"
cf.unregister()
except KeyboardInterrupt:
print "\n\nShutting down.\n\n"
if cif_objs != None:
cif_objs.send() # flush
if cf != None:
cf.ctrlc()
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
except KeyError as e:
print "PB KeyError: ", e
traceback.print_exc(file=sys.stdout)
except Exception as inst:
print "Unexpected error: ", sys.exc_info()[0], " ", sys.exc_info()[1], " "
traceback.print_tb(sys.exc_info()[2])
except TTransportException as e:
print "Can't connect to HBase"
|
|
import logging
log = logging.getLogger(__name__)
import itertools
import importlib
from functools import partial
from collections import defaultdict
import numpy as np
import pandas as pd
import pyqtgraph as pg
from atom.api import (Unicode, Float, Tuple, Int, Typed, Property, Atom, Bool,
Enum, List, Dict, Callable, Value)
from enaml.application import deferred_call, timed_call
from enaml.colors import parse_color
from enaml.core.api import Looper, Declarative, d_, d_func
from enaml.qt.QtGui import QColor
from psi.util import SignalBuffer, ConfigurationException
from psi.core.enaml.api import load_manifests, PSIContribution
from psi.controller.calibration import util
from psi.context.context_item import ContextMeta
################################################################################
# Utility functions
################################################################################
def get_x_fft(fs, duration):
n_time = int(fs * duration)
freq = np.fft.rfftfreq(n_time, fs**-1)
return np.log10(freq)
def get_color_cycle(name):
module_name, cmap_name = name.rsplit('.', 1)
module = importlib.import_module(module_name)
cmap = getattr(module, cmap_name)
return itertools.cycle(cmap.colors)
def make_color(color):
if isinstance(color, tuple):
return QColor(*color)
elif isinstance(color, str):
return QColor(color)
else:
raise ValueError('Unknown color %r', color)
################################################################################
# Style mixins
################################################################################
class ColorCycleMixin(Declarative):
#: Define the pen color cycle. Can be a list of colors or a string
#: indicating the color palette to use in palettable.
pen_color_cycle = d_(Typed(object))
_plot_colors = Typed(dict)
def _make_plot_cycle(self):
if isinstance(self.pen_color_cycle, str):
cycle = get_color_cycle(self.pen_color_cycle)
else:
cycle = itertools.cycle(self.pen_color_cycle)
return defaultdict(lambda: next(cycle))
@d_func
def get_pen_color(self, key):
if self._plot_colors is None:
self._plot_colors = self._make_plot_cycle()
color = self._plot_colors[key]
if not isinstance(color, str):
return QColor(*color)
else:
return QColor(color)
def _observe_pen_color_cycle(self, event):
self._plot_colors = self._make_plot_cycle()
self.reset_plots()
def reset_plots(self):
raise NotImplementedError
################################################################################
# Supporting classes
################################################################################
class BaseDataRange(Atom):
container = Typed(object)
# Size of display window
span = Float(1)
# Delay before clearing window once data has "scrolled off" the window.
delay = Float(0)
# Current visible data range
current_range = Tuple(Float(), Float())
def add_source(self, source):
cb = partial(self.source_added, source=source)
source.add_callback(cb)
def _default_current_range(self):
return 0, self.span
def _observe_delay(self, event):
self._update_range()
def _observe_span(self, event):
self._update_range()
def _update_range(self):
raise NotImplementedError
class EpochDataRange(BaseDataRange):
max_duration = Float()
def source_added(self, data, source):
n = [len(d['signal']) for d in data]
max_duration = max(n) / source.fs
self.max_duration = max(max_duration, self.max_duration)
def _observe_max_duration(self, event):
self._update_range()
def _update_range(self):
self.current_range = 0, self.max_duration
class ChannelDataRange(BaseDataRange):
# Automatically updated. Indicates last "seen" time based on all data
# sources reporting to this range.
current_time = Float(0)
current_samples = Typed(defaultdict, (int,))
current_times = Typed(defaultdict, (float,))
def _observe_current_time(self, event):
self._update_range()
def _update_range(self):
low_value = (self.current_time//self.span)*self.span - self.delay
high_value = low_value+self.span
self.current_range = low_value, high_value
def add_event_source(self, source):
cb = partial(self.event_source_added, source=source)
source.add_callback(cb)
def source_added(self, data, source):
self.current_samples[source] += data.shape[-1]
self.current_times[source] = self.current_samples[source]/source.fs
self.current_time = max(self.current_times.values())
def event_source_added(self, data, source):
self.current_times[source] = data[-1][1]
self.current_time = max(self.current_times.values())
def create_container(children, x_axis=None):
container = pg.GraphicsLayout()
container.setSpacing(10)
# Add the x and y axes to the layout, along with the viewbox.
for i, child in enumerate(children):
container.addItem(child.y_axis, i, 0)
container.addItem(child.viewbox, i, 1)
if x_axis is not None:
container.addItem(x_axis, i+1, 1)
# Link the child viewboxes together
for child in children[1:]:
child.viewbox.setXLink(children[0].viewbox)
#children[0].viewbox.setXRange(0, 100, padding=0)
return container
################################################################################
# Pattern containers
################################################################################
class MultiPlotContainer(Looper, PSIContribution):
group = d_(Unicode())
containers = d_(Dict())
_workbench = Value()
selected_item = Value()
def refresh_items(self):
super().refresh_items()
if not self.iterable:
return
self.containers = {str(i): c[0].container for \
i, c in zip(self.iterable, self.items)}
load_manifests(self.items, self._workbench)
for item in self.items:
load_manifests(item, self._workbench)
load_manifests(item[0].children, self._workbench)
deferred_call(item[0].format_container)
################################################################################
# Containers (defines a shared set of containers across axes)
################################################################################
class BasePlotContainer(PSIContribution):
label = d_(Unicode())
container = Typed(pg.GraphicsWidget)
x_axis = Typed(pg.AxisItem)
base_viewbox = Property()
legend = Typed(pg.LegendItem)
def _default_container(self):
return create_container(self.children, self.x_axis)
def _default_legend(self):
legend = pg.LegendItem()
legend.setParentItem(self.container)
return legend
def _get_base_viewbox(self):
return self.children[0].viewbox
def _default_x_axis(self):
x_axis = pg.AxisItem('bottom')
x_axis.setGrid(64)
x_axis.linkToView(self.children[0].viewbox)
return x_axis
def update(self, event=None):
pass
def find(self, name):
for child in self.children:
if child.name == name:
return child
def format_container(self):
pass
def _reset_plots(self):
pass
class PlotContainer(BasePlotContainer):
x_min = d_(Float(0))
x_max = d_(Float(0))
def format_container(self):
# If we want to specify values relative to a psi context variable, we
# cannot do it when initializing the plots.
if (self.x_min != 0) or (self.x_max != 0):
self.base_viewbox.setXRange(self.x_min, self.x_max, padding=0)
def update(self, event=None):
deferred_call(self.format_container)
class BaseTimeContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same time-based X-axis
'''
data_range = Typed(BaseDataRange)
span = d_(Float(1))
delay = d_(Float(0.25))
def _default_container(self):
container = super()._default_container()
# Ensure that the x axis shows the planned range
self.base_viewbox.setXRange(0, self.span, padding=0)
self.data_range.observe('current_range', self.update)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Time', unitPrefix='sec.')
return x_axis
def update(self, event=None):
low, high = self.data_range.current_range
deferred_call(self.base_viewbox.setXRange, low, high, padding=0)
super().update()
class TimeContainer(BaseTimeContainer):
def _default_data_range(self):
return ChannelDataRange(container=self, span=self.span,
delay=self.delay)
def update(self, event=None):
for child in self.children:
child.update()
super().update()
class EpochTimeContainer(BaseTimeContainer):
def _default_data_range(self):
return EpochDataRange(container=self, span=self.span, delay=self.delay)
def format_log_ticks(values, scale, spacing):
values = 10**np.array(values).astype(np.float)
return ['{:.1f}'.format(v) for v in values]
class FFTContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same frequency-based X-axis
'''
freq_lb = d_(Float(5))
freq_ub = d_(Float(50000))
def _default_container(self):
container = super()._default_container()
self.base_viewbox.setXRange(np.log10(self.freq_lb),
np.log10(self.freq_ub),
padding=0)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Frequency (Hz)')
x_axis.logTickStrings = format_log_ticks
x_axis.setLogMode(True)
return x_axis
################################################################################
# ViewBox
################################################################################
class ViewBox(PSIContribution):
viewbox = Typed(pg.ViewBox)
y_axis = Typed(pg.AxisItem)
y_mode = d_(Enum('symmetric', 'upper'))
y_min = d_(Float(0))
y_max = d_(Float(0))
allow_zoom_y = d_(Bool(True))
allow_zoom_x = d_(Bool(False))
data_range = Property()
def _default_name(self):
return self.label
def _get_data_range(self):
return self.parent.data_range
def _default_y_axis(self):
y_axis = pg.AxisItem('left')
y_axis.setLabel(self.label)
y_axis.linkToView(self.viewbox)
y_axis.setGrid(64)
return y_axis
def _default_viewbox(self):
viewbox = pg.ViewBox(enableMenu=False)
viewbox.setMouseEnabled(x=False, y=True)
viewbox.setBackgroundColor('w')
if (self.y_min != 0) or (self.y_max != 0):
viewbox.disableAutoRange()
viewbox.setYRange(self.y_min, self.y_max)
for child in self.children:
for plot in child.get_plots():
viewbox.addItem(plot)
return viewbox
def update(self, event=None):
for child in self.children:
child.update()
def add_plot(self, plot, label=None):
self.viewbox.addItem(plot)
if label:
self.parent.legend.addItem(plot, label)
def plot(self, x, y, color='k', log_x=False, log_y=False, label=None,
kind='line'):
'''
Convenience function used by plugins
This is typically used in post-processing routines to add static plots
to existing view boxes.
'''
if log_x:
x = np.log10(x)
if log_y:
y = np.log10(y)
x = np.asarray(x)
y = np.asarray(y)
m = np.isfinite(x) & np.isfinite(y)
x = x[m]
y = y[m]
if kind == 'line':
item = pg.PlotCurveItem(pen=pg.mkPen(color))
elif kind == 'scatter':
item = pg.ScatterPlotItem(pen=pg.mkPen(color))
item.setData(x, y)
self.add_plot(item)
if label is not None:
self.parent.legend.addItem(item, label)
################################################################################
# Plots
################################################################################
class BasePlot(PSIContribution):
# Make this weak-referenceable so we can bind methods to Qt slots.
__slots__ = '__weakref__'
source_name = d_(Unicode())
source = Typed(object)
label = d_(Unicode())
def update(self, event=None):
pass
def _reset_plots(self):
pass
################################################################################
# Single plots
################################################################################
class SinglePlot(BasePlot):
pen_color = d_(Typed(object))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
label = d_(Unicode())
pen = Typed(object)
plot = Typed(object)
def get_plots(self):
return [self.plot]
def _default_pen_color(self):
return 'black'
def _default_pen(self):
color = make_color(self.pen_color)
return pg.mkPen(color, width=self.pen_width)
def _default_name(self):
return self.source_name + '_plot'
class ChannelPlot(SinglePlot):
downsample = Int(0)
decimate_mode = d_(Enum('extremes', 'mean'))
_cached_time = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_channel_plot'
def _default_plot(self):
return pg.PlotCurveItem(pen=self.pen, antialias=self.antialias)
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_source(self.source)
self.parent.data_range.observe('span', self._update_time)
self.source.add_callback(self._append_data)
self.parent.viewbox.sigResized.connect(self._update_decimation)
self._update_time(None)
self._update_decimation(self.parent.viewbox)
def _update_time(self, event):
# Precompute the time array since this can be the "slow" point
# sometimes in computations
n = round(self.parent.data_range.span*self.source.fs)
self._cached_time = np.arange(n)/self.source.fs
self._update_decimation()
self._update_buffer()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs,
self.parent.data_range.span*2)
def _update_decimation(self, viewbox=None):
try:
width, _ = self.parent.viewbox.viewPixelSize()
dt = self.source.fs**-1
self.downsample = round(width/dt/2)
except Exception as e:
pass
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def update(self, event=None):
low, high = self.parent.data_range.current_range
data = self._buffer.get_range_filled(low, high, np.nan)
t = self._cached_time[:len(data)] + low
if self.downsample > 1:
t = t[::self.downsample]
if self.decimate_mode == 'extremes':
d_min, d_max = decimate_extremes(data, self.downsample)
t = t[:len(d_min)]
x = np.c_[t, t].ravel()
y = np.c_[d_min, d_max].ravel()
if x.shape == y.shape:
deferred_call(self.plot.setData, x, y, connect='pairs')
elif self.decimate_mode == 'mean':
d = decimate_mean(data, self.downsample)
t = t[:len(d)]
if t.shape == d.shape:
deferred_call(self.plot.setData, t, d)
else:
t = t[:len(data)]
deferred_call(self.plot.setData, t, data)
def _reshape_for_decimate(data, downsample):
# Determine the "fragment" size that we are unable to decimate. A
# downsampling factor of 5 means that we perform the operation in chunks of
# 5 samples. If we have only 13 samples of data, then we cannot decimate
# the last 3 samples and will simply discard them.
last_dim = data.ndim
offset = data.shape[-1] % downsample
if offset > 0:
data = data[..., :-offset]
shape = (len(data), -1, downsample) if data.ndim == 2 else (-1, downsample)
return data.reshape(shape)
def decimate_mean(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
data = _reshape_for_decimate(data, downsample).copy()
return data.mean(axis=-1)
def decimate_extremes(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
# Force a copy to be made, which speeds up min()/max(). Apparently min/max
# make a copy of a reshaped array before performing the operation, so we
# force it now so the copy only occurs once.
data = _reshape_for_decimate(data, downsample).copy()
return data.min(axis=-1), data.max(axis=-1)
class FFTChannelPlot(ChannelPlot):
time_span = d_(Float(1))
window = d_(Enum('hamming', 'flattop'))
_x = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_fft_plot'
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._append_data)
self.source.observe('fs', self._cache_x)
self._update_buffer()
self._cache_x()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs, self.time_span)
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def _cache_x(self, event=None):
if self.source.fs:
self._x = get_x_fft(self.source.fs, self.time_span)
def update(self, event=None):
if self._buffer.get_time_ub() >= self.time_span:
data = self._buffer.get_latest(-self.time_span, 0)
#psd = util.patodb(util.psd(data, self.source.fs, self.window))
psd = util.psd(data, self.source.fs, self.window)
spl = self.source.calibration.get_spl(self._x, psd)
deferred_call(self.plot.setData, self._x, spl)
class BaseTimeseriesPlot(SinglePlot):
rect_center = d_(Float(0.5))
rect_height = d_(Float(1))
fill_color = d_(Typed(object))
brush = Typed(object)
_rising = Typed(list, ())
_falling = Typed(list, ())
def _default_brush(self):
return pg.mkBrush(self.fill_color)
def _default_plot(self):
plot = pg.QtGui.QGraphicsPathItem()
plot.setPen(self.pen)
plot.setBrush(self.brush)
return plot
def update(self, event=None):
lb, ub = self.parent.data_range.current_range
current_time = self.parent.data_range.current_time
starts = self._rising
ends = self._falling
if len(starts) == 0 and len(ends) == 1:
starts = [0]
elif len(starts) == 1 and len(ends) == 0:
ends = [current_time]
elif len(starts) > 0 and len(ends) > 0:
if starts[0] > ends[0]:
starts = np.r_[0, starts]
if starts[-1] > ends[-1]:
ends = np.r_[ends, current_time]
try:
epochs = np.c_[starts, ends]
except ValueError as e:
log.exception(e)
log.warning('Unable to update %r, starts shape %r, ends shape %r',
self, starts, ends)
return
m = ((epochs >= lb) & (epochs < ub)) | np.isnan(epochs)
epochs = epochs[m.any(axis=-1)]
path = pg.QtGui.QPainterPath()
y_start = self.rect_center - self.rect_height*0.5
for x_start, x_end in epochs:
x_width = x_end-x_start
r = pg.QtCore.QRectF(x_start, y_start, x_width, self.rect_height)
path.addRect(r)
deferred_call(self.plot.setPath, path)
class EventPlot(BaseTimeseriesPlot):
event = d_(Unicode())
def _observe_event(self, event):
if self.event is not None:
self.parent.data_range.observe('current_time', self.update)
def _default_name(self):
return self.event + '_timeseries'
def _append_data(self, bound, timestamp):
if bound == 'start':
self._rising.append(timestamp)
elif bound == 'end':
self._falling.append(timestamp)
self.update()
class TimeseriesPlot(BaseTimeseriesPlot):
source_name = d_(Unicode())
source = Typed(object)
def _default_name(self):
return self.source_name + '_timeseries'
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_event_source(self.source)
self.parent.data_range.observe('current_time', self.update)
self.source.add_callback(self._append_data)
def _append_data(self, data):
for (etype, value) in data:
if etype == 'rising':
self._rising.append(value)
elif etype == 'falling':
self._falling.append(value)
################################################################################
# Group plots
################################################################################
class GroupMixin(ColorCycleMixin):
source = Typed(object)
group_meta = d_(Unicode())
groups = d_(Typed(ContextMeta))
group_names = d_(List())
#: Function that takes the epoch metadata and decides whether to accept it
#: for plotting. Useful to reduce the number of plots shown on a graph.
group_filter = d_(Callable())
#: Function that takes the epoch metadata and returns a key indicating
#: which group it should included in for plotting.
group_color_key = d_(Callable())
pen_width = d_(Int(0))
antialias = d_(Bool(False))
plots = Dict()
_data_cache = Typed(object)
_data_count = Typed(object)
_data_updated = Typed(object)
_data_n_samples = Typed(object)
_pen_color_cycle = Typed(object)
_plot_colors = Typed(object)
_x = Typed(np.ndarray)
n_update = d_(Int(1))
def _default_group_names(self):
return [p.name for p in self.groups.values]
def _default_group_filter(self):
return lambda key: True
def _default_group_color_key(self):
return lambda key: tuple(key[g] for g in self.group_names)
def get_pen_color(self, key):
kw_key = {n: k for n, k in zip(self.group_names, key)}
group_key = self.group_color_key(kw_key)
return super().get_pen_color(group_key)
def reset_plots(self):
# Clear any existing plots and reset color cycle
for plot in self.plots.items():
self.parent.viewbox.removeItem(plot)
self.plots = {}
self._data_cache = defaultdict(list)
self._data_count = defaultdict(int)
self._data_updated = defaultdict(int)
self._data_n_samples = defaultdict(int)
def _observe_groups(self, event):
self.groups.observe('values', self._update_groups)
self._update_groups()
def _update_groups(self, event=None):
self.reset_plots()
self.group_names = [p.name for p in self.groups.values]
if self.source is not None:
self.update()
def get_plots(self):
return []
def _make_new_plot(self, key):
log.info('Adding plot for key %r', key)
try:
pen_color = self.get_pen_color(key)
pen = pg.mkPen(pen_color, width=self.pen_width)
plot = pg.PlotCurveItem(pen=pen, antialias=self.antialias)
deferred_call(self.parent.viewbox.addItem, plot)
self.plots[key] = plot
except KeyError as key_error:
key = key_error.args[0]
m = f'Cannot update plot since a field, {key}, ' \
'required by the plot is missing.'
raise ConfigurationException(m) from key_error
def get_plot(self, key):
if key not in self.plots:
self._make_new_plot(key)
return self.plots[key]
class EpochGroupMixin(GroupMixin):
duration = Float()
def _y(self, epoch):
return np.mean(epoch, axis=0) if len(epoch) \
else np.full_like(self._x, np.nan)
def _update_duration(self, event=None):
self.duration = self.source.duration
def _epochs_acquired(self, epochs):
for d in epochs:
md = d['info']['metadata']
if self.group_filter(md):
signal = d['signal']
key = tuple(md[n] for n in self.group_names)
self._data_cache[key].append(signal)
self._data_count[key] += 1
# Track number of samples
n = max(self._data_n_samples[key], len(signal))
self._data_n_samples[key] = n
# Does at least one epoch need to be updated?
for key, count in self._data_count.items():
if count >= self._data_updated[key] + self.n_update:
n = max(self._data_n_samples.values())
self.duration = n / self.source.fs
self.update()
break
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._epochs_acquired)
self.source.observe('duration', self._update_duration)
self.source.observe('fs', self._cache_x)
self.observe('duration', self._cache_x)
self._reset_plots()
self._cache_x()
def update(self, event=None):
# Update epochs that need updating
todo = []
for key, count in list(self._data_count.items()):
if count >= self._data_updated[key] + self.n_update:
data = self._data_cache[key]
plot = self.get_plot(key)
y = self._y(data)
todo.append((plot.setData, self._x, y))
self._data_updated[key] = len(data)
def update():
for setter, x, y in todo:
setter(x, y)
deferred_call(update)
class GroupedEpochAveragePlot(EpochGroupMixin, BasePlot):
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.duration:
n_time = round(self.source.fs * self.duration)
self._x = np.arange(n_time)/self.source.fs
def _default_name(self):
return self.source_name + '_grouped_epoch_average_plot'
def _observe_source(self, event):
super()._observe_source(event)
if self.source is not None:
self.parent.data_range.add_source(self.source)
class GroupedEpochFFTPlot(EpochGroupMixin, BasePlot):
def _default_name(self):
return self.source_name + '_grouped_epoch_fft_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return self.source.calibration.get_spl(self._x, util.psd(y, self.source.fs))
class GroupedEpochPhasePlot(EpochGroupMixin, BasePlot):
unwrap = d_(Bool(True))
def _default_name(self):
return self.source_name + '_grouped_epoch_phase_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return util.phase(y, self.source.fs, unwrap=self.unwrap)
class StackedEpochAveragePlot(EpochGroupMixin, BasePlot):
_offset_update_needed = Bool(False)
def _make_new_plot(self, key):
super()._make_new_plot(key)
self._offset_update_needed = True
def _update_offsets(self, vb=None):
vb = self.parent.viewbox
height = vb.height()
n = len(self.plots)
for i, (_, plot) in enumerate(sorted(self.plots.items())):
offset = (i+1) * height / (n+1)
point = vb.mapToView(pg.Point(0, offset))
plot.setPos(0, point.y())
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.source.duration:
n_time = round(self.source.fs * self.source.duration)
self._x = np.arange(n_time)/self.source.fs
def update(self):
super().update()
if self._offset_update_needed:
deferred_call(self._update_offsets)
self._offset_update_needed = False
def _reset_plots(self):
#super()._reset_plots()
self.parent.viewbox \
.sigRangeChanged.connect(self._update_offsets)
self.parent.viewbox \
.sigRangeChangedManually.connect(self._update_offsets)
################################################################################
# Simple plotters
################################################################################
class ResultPlot(SinglePlot):
x_column = d_(Unicode())
y_column = d_(Unicode())
average = d_(Bool())
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
data_filter = d_(Callable())
_data_cache = Typed(list)
def _default_data_filter(self):
# By default, accept all data points
return lambda x: True
def _default_name(self):
return '.'.join((self.parent.name, self.source_name, 'result_plot',
self.x_column, self.y_column))
def _observe_source(self, event):
if self.source is not None:
self._data_cache = []
self.source.add_callback(self._data_acquired)
def _data_acquired(self, data):
update = False
for d in data:
if self.data_filter(d):
x = d[self.x_column]
y = d[self.y_column]
self._data_cache.append((x, y))
update = True
if update:
self.update()
def update(self, event=None):
if not self._data_cache:
return
x, y = zip(*self._data_cache)
x = np.array(x)
y = np.array(y)
if self.average:
d = pd.DataFrame({'x': x, 'y': y}).groupby('x')['y'].mean()
x = d.index.values
y = d.values
deferred_call(self.plot.setData, x, y)
def _default_plot(self):
symbol_code = self.SYMBOL_MAP[self.symbol]
color = QColor(self.pen_color)
pen = pg.mkPen(color, width=self.pen_width)
brush = pg.mkBrush(color)
plot = pg.PlotDataItem(pen=pen,
antialias=self.antialias,
symbol=symbol_code,
symbolSize=self.symbol_size,
symbolPen=pen,
symbolBrush=brush,
pxMode=self.symbol_size_unit=='screen')
deferred_call(self.parent.add_plot, plot, self.label)
return plot
class DataFramePlot(ColorCycleMixin, PSIContribution):
data = d_(Typed(pd.DataFrame))
x_column = d_(Unicode())
y_column = d_(Unicode())
grouping = d_(List(Unicode()))
_plot_cache = Dict()
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
def _default_name(self):
return '.'.join((self.parent.name, 'result_plot'))
def _observe_x_column(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_y_column(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_grouping(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_data(self, event):
if self.data is None:
return
if self.x_column not in self.data:
return
if self.y_column not in self.data:
return
todo = []
if self.grouping:
try:
for group, values in self.data.groupby(self.grouping):
if group not in self._plot_cache:
self._plot_cache[group] = self._default_plot(group)
x = values[self.x_column].values
y = values[self.y_column].values
i = np.argsort(x)
todo.append((self._plot_cache[group], x[i], y[i]))
except KeyError as e:
# This is likely triggered when grouping updates an analysis
# before it's ready.
log.warning(e)
return
else:
if None not in self._plot_cache:
self._plot_cache[None] = self._default_plot(None)
x = self.data[self.x_column].values
y = self.data[self.y_column].values
i = np.argsort(x)
todo.append((self._plot_cache[None], x[i], y[i]))
def update():
nonlocal todo
for plot, x, y in todo:
plot.setData(x, y)
deferred_call(update)
def _default_plot(self, group):
symbol_code = self.SYMBOL_MAP[self.symbol]
color = self.get_pen_color(group)
brush = pg.mkBrush(color)
pen = pg.mkPen(color, width=self.pen_width)
plot = pg.PlotDataItem(pen=pen,
antialias=self.antialias,
symbol=symbol_code,
symbolSize=self.symbol_size,
symbolPen=pen,
symbolBrush=brush,
pxMode=self.symbol_size_unit=='screen')
deferred_call(self.parent.add_plot, plot, self.label)
return plot
def reset_plots(self):
for plot in self._plot_cache.values():
deferred_call(self.parent.viewbox.removeItem, plot)
self._plot_cache = {}
def get_plots(self):
return list(self._plot_cache.values())
|
|
'''
Copyleft Mar 26, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: [email protected]
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import pylab as plt;
import matplotlib as mpl
import os;
import Utils.Util as utl
import Utils.Plots as pplt
import Scripts.KyrgysHAPH.Util as kutl
import Scripts.KyrgysHAPH.Plot as kplt
from matplotlib.backends.backend_pdf import PdfPages
reload(utl)
path='/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/Kyrgyz/scan/'
selscanPath='/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/Kyrgyz/scan/selscan/'
def to_hg38(b):
b=b.reset_index()
b.CHROM=b.CHROM.apply(lambda x: 'chr{}'.format(x))
chroms=map(lambda x: 'chr{}'.format(x),range(1,23))
c=utl.BED.xmap_bed(b,19,38).dropna()
c=pd.concat([c[38],c[19].score],1).loc[chroms].reset_index()
c.CHROM=c.CHROM.apply(lambda x: int(x[3:]))
c[['start','end']]=c[['start','end']].applymap(int)
return utl.BED.getIntervals( c,50000).dropna()
def to_hg38_all():
a=pd.read_pickle(selscanPath+'normalized.filtered.df').reset_index();a.CHROM=a.CHROM.apply(lambda x: 'chr{}'.format(x));a['start']=a.POS;a['end']=a.POS
chroms=map(lambda x: 'chr{}'.format(x),range(1,23))
c=utl.BED.xmap_bed(a[['CHROM','start','end']],19,38).dropna()
c=c.groupby(level=0,axis=1).apply(lambda x: x[x.name].start).applymap(int).reset_index().rename(columns={19:'POS'});c.CHROM=c.CHROM.apply(lambda x: int(x[3:]));c=c.set_index(['CHROM','POS'])[38]
aout=pd.read_pickle(selscanPath+'normalized.filtered.df').join(c)
aout=aout[~aout[38].isnull()]
aout[38]=aout[38].astype(int)
aout=aout.reset_index().set_index(['CHROM',38])
aout=aout[['ihs','nsl']]
aout.index.names=['CHROM','POS']
aout.to_pickle(selscanPath+'normalized.filtered.mapped.df')
def normalize():
f=utl.normalizeIHS
a=pd.concat([f(pd.read_pickle(selscanPath+'ihs.df')),f(pd.read_pickle(selscanPath+'nsl.df'))],1).apply(utl.pval.zpval);a.columns=['ihs','nsl']
a.to_pickle(selscanPath+'normalized.df')
def saveIntervals():
try:
a=pd.read_pickle(selscanPath+'normalized.filtered.mapped.df')
except:
# pd.read_pickle(selscanPath+'normalized.df').apply(lambda x: utl.filterGap(x.dropna(),assempbly=19)).to_pickle(selscanPath+'normalized.filtered.df')
a=pd.read_pickle(selscanPath+'normalized.filtered.mapped.df')
getReg=lambda method: (utl.BED.getIntervals(a[method].dropna().sort_values().iloc[-200:],padding=50000)).reset_index()
methods=['ihs','nsl']
pd.concat(map(getReg,methods),keys=methods).to_pickle(path+'ihs-nsl.regions.df')
# pd.concat([pd.read_pickle(path+'SFS.regions.df'),pd.read_pickle(path+'ihs-nsl.regions.df')]).to_pickle(path+'intervals.df')
def saveihsnsl():
a=pd.read_pickle(selscanPath+'normalized.filtered.df').apply(utl.scanGenome)
a.to_pickle(path+'ihs-nsl.df')
pplt.Manhattan(a,top_k=200)
def saveXP():
cmh=pd.read_pickle('/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/Kyrgyz/scan/CMH.df')['control-case'].rename('cmh').sort_index()
a=utl.scanGenome(cmh.loc[range(1,23)])
x=utl.loadFst('/home/arya/storage/Data/Human/Kyrgyz/scan/Fst/HAPH_Healthy.weir.fst')
b=utl.scanGenome(x)
pplt.Manhattan(b,top_k=250)
c=kutl.Data.renamePops(pd.read_pickle(kutl.path+'data/hg38/freq.df')).loc[range(1,23)]
diff=pd.concat([(c['Healthy']-c['Sick']).abs().replace({0:None}).dropna().rename('diff-SH'),(c['HAPH']-c['No-HAPH']).abs().replace({0:None}).dropna().rename('diff-HN')],1)
diff.to_pickle('/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/Kyrgyz/scan/diff.df')
diff=pd.read_pickle('/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/Kyrgyz/scan/diff.df')
c=diff.apply(utl.scanGenome)
c.groupby(level=0).size().shape
pplt.Manhattan(c,top_k=200)
d=pd.concat([a,b,c],1)
pplt.Manhattan(d,top_k=200)
d.to_pickle('/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/Kyrgyz/scan/xp.reg.df')
xp=pd.concat([diff,x,cmh],1)
xp.loc[range(1,23)].to_pickle('/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/Kyrgyz/scan/xp.df')
# saveIntervals()
# a=utl.BED.getIntervals(a.iloc[:2000],50000).sort_values('score').iloc[-200:]
def plot():
f=lambda x: x[x>x.quantile(0.95)]
a=pd.concat([pd.read_pickle(kutl.path+'scan/SFS.df').unstack(level=0),pd.read_pickle(selscanPath+'normalized.filtered.mapped.df')],1).apply(f)
fig,axes=plt.subplots(5, 1, sharex=True,figsize=(20, 20),dpi=100)
pplt.Manhattan(a,top_k=200,fname=kutl.path+'plots/scan.png',fig=fig)
e=pd.read_pickle(kutl.path+'scan/xp.reg.df').apply(f);
fig,axes=plt.subplots(4, 1, sharex=True,figsize=(20, 15),dpi=100);pplt.Manhattan(e,top_k=200,fig=fig,fname=kutl.path+'plots/scan.xp.png')
def Fst():
fname='/home/arya/storage/Data/Human/Kyrgyz/scan/Fst/HAPH_Healthy.weir.fst'
fst=utl.scanGenome(utl.loadFst(fname))
fst
O=fst[fst>fst.quantile(0.995)]
pplt.Manhattan(fst,Outliers=O.reset_index())
shade=utl.BED.getIntervals(O,30000).reset_index()
shade.CHROM=shade.CHROM.astype(int)
# pd.concat([pd.concat([shade],keys=['fst']),pd.read_pickle(path+'intervals.df')]).to_pickle(path+'intervals.df')
def getRegions(xpstat,regions,ihs,freq):
x=pd.read_pickle(kutl.path+'scan/xp.df')[xpstat].dropna()
shade=utl.BED.getIntervals(x.sort_values().iloc[-500:],50000).sort_values('score',ascending=False).reset_index();shade.CHROM=shade.CHROM.astype(int)
pad=10000
regions.sort_index(inplace=True)
for i,row in regions.iterrows():
print i
tmp=get(x,row)
regions.loc[i,'m'],regions.loc[i,'mu'],regions.loc[i,'mumed'],regions.loc[i,'max']=tmp.size,tmp.mean(), tmp[tmp>=tmp.median()].mean(),tmp.max()
tmpihs=get(ihs,row)
# print tmpihs
# tmpx=freq.loc[row.CHROM].loc[tmpihs.idxmax()]
# regions.loc[i,'ihsx'] = tmpx['No-HAPH']-tmpx['HAPH']
regions.sort_values('mu',ascending=False,inplace=True)
def get(x,row,pad=0):
tmp=x.loc[row.CHROM]
return tmp[(tmp.index+pad>=row.start)&(tmp.index-pad<=row.end)]
def tomorrow(field):
regions=pd.concat([pd.read_pickle(kutl.path+'scan/ihs-nsl.regions.df'),pd.read_pickle(kutl.path+'scan/SFS.regions.df')])
regions.index.names=['method','i'];
regions=regions.reset_index()
ihs=pd.read_pickle(selscanPath+'normalized.filtered.mapped.df').ihs.dropna()
f=lambda x: x[x>x.quantile(0.95)]
# a=pd.concat([pd.read_pickle(kutl.path+'scan/selscan/normalized.filtered.mapped.df'),pd.read_pickle(kutl.path+'scan/SFS.df').unstack(0)]).apply(f);a['ihs']=a['ihs'][a['ihs']>5];a['nsl']=a['nsl'][a['nsl']>5];a.to_pickle(kutl.path + 'scan/scan.top5percent.df');
a=pd.read_pickle(kutl.path + 'scan/scan.top5percent.df')
#
# getRegions('diff-HN',regions,ihs,freq);regions.to_pickle(kutl.path+'scan/meeting.regions.df')
regions=pd.read_pickle(kutl.path+'scan/meeting.regions.df')
interval=regions.iloc[0]
plt.ion()
plt.ioff()
fields=['mu','max','mumed']
regions.isnull().sum()
# for field in fields:
regions.sort_values(field,ascending=False,inplace=True)
with PdfPages(kutl.path+'plots/{}.pdf'.format(field)) as pdf:
for i in range(50):
interval=regions.iloc[i]
try:
padd=3e5;intervalExpanded=interval.copy(True);intervalExpanded.start-=padd;intervalExpanded.end+=padd
fig,axes=plt.subplots(5, 1, sharex=True,figsize=(20, 20),dpi=100);pplt.Manhattan(a.loc[[interval.CHROM]],fig=fig,shade=pd.DataFrame(intervalExpanded).T);plt.suptitle('Interval {} ({})'.format(i,interval.method))
pdf.savefig(plt.gcf())
ii=get(ihs,interval)
ii=ii[ii>5]
# ii=pd.concat([],keys=[interval.CHROM])
# ii.index.names=['CHROM','POS']
reload(kplt);kplt.plotSFSold2(interval=interval, fold=not True, ii=ii);pplt.annotate('Interval {} ({})'.format(i, interval.method), fontsize=12, ax=plt.gcf().axes[0])
pdf.savefig(plt.gcf())
plt.close('all')
except:
pass
print i,field
def saveTracks():
a=pd.read_pickle(kutl.path + 'scan/scan.top5percent.df')
a=a[['ihs','nsl']]
a=a[a.isnull().sum(1)<2]
utl.BED.saveBEDGraphDF(a,fout_path=kutl.path+'UCSC/ihs-nsl')
f=lambda x: x[x>x.quantile(0.95)]
a=pd.read_pickle(kutl.path + 'scan/SFS.df').unstack(0).apply(f)
utl.BED.saveBEDGraphDF(a,fout_path=kutl.path+'UCSC/SFS',winSize=100000)
def final():
freq=kutl.Data.renamePops(pd.read_pickle(kutl.path+'data/hg38/freq.df')).loc[range(1,23)]
I=(freq['No-HAPH']-freq['HAPH'])>0.2
a=utl.scanGenome(I,np.sum)
o=utl.BED.getIntervals(a.sort_values(ascending=False).iloc[:100],padding=30000).sort_values('score',ascending=False).reset_index()
pplt.Manhattan(a,top_k=100)
plt.ioff()
with PdfPages(kutl.path+'plots/{}.pdf'.format('a')) as pdf:
for i in range(o.shape[0]):
interval=o.iloc[i]
reload(kplt);kplt.plotSFSold2(interval=interval, fold=not True);pplt.annotate('Interval {} ({})'.format(i, ' '), fontsize=12, ax=plt.gcf().axes[0])
pdf.savefig(plt.gcf())
plt.close('all')
# tomorrow('max')
# tomorrow('mu')
# tomorrow('mumed')
a=pd.read_pickle('/media/arya/d4565cf2-d44a-4b67-bf97-226a486c01681/Data/Human/Kyrgyz/scan/win.500K.df').loc[range(1,23)]
a
reload(utl)
aa=utl.filterGap(a)
a.size
a=kutl.Data.renamePops(a)
b=a.xs('SFSelect',level='method')
b['NoHAPH/HAPH']=b['No-HAPH']-b['HAPH']
pplt.Manhattan(b[b>0.2].loc[[11]])
|
|
#!/usr/bin/env python
# Copyright 2016 Vijayaditya Peddinti
# Apache 2.0
from __future__ import print_function
import argparse
import sys
import os
import subprocess
import errno
import copy
import shutil
import warnings
def GetArgs():
# we add compulsary arguments as named arguments for readability
parser = argparse.ArgumentParser(description="""
**Warning, this script is deprecated. Please use utils/data/combine_short_segments.sh**
This script concatenates segments in the input_data_dir to ensure that"""
" the segments in the output_data_dir have a specified minimum length.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--minimum-duration", type=float, required = True,
help="Minimum duration of the segments in the output directory")
parser.add_argument("--input-data-dir", type=str, required = True)
parser.add_argument("--output-data-dir", type=str, required = True)
print(' '.join(sys.argv))
args = parser.parse_args()
return args
def RunKaldiCommand(command, wait = True):
""" Runs commands frequently seen in Kaldi scripts. These are usually a
sequence of commands connected by pipes, so we use shell=True """
p = subprocess.Popen(command, shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
if wait:
[stdout, stderr] = p.communicate()
if p.returncode is not 0:
raise Exception("There was an error while running the command {0}\n".format(command)+"-"*10+"\n"+stderr)
return stdout, stderr
else:
return p
def MakeDir(dir):
try:
os.mkdir(dir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise exc
raise Exception("Directory {0} already exists".format(dir))
pass
def CheckFiles(input_data_dir):
for file_name in ['spk2utt', 'text', 'utt2spk', 'feats.scp']:
file_name = '{0}/{1}'.format(input_data_dir, file_name)
if not os.path.exists(file_name):
raise Exception("There is no such file {0}".format(file_name))
def ParseFileToDict(file, assert2fields = False, value_processor = None):
if value_processor is None:
value_processor = lambda x: x[0]
dict = {}
for line in open(file, 'r'):
parts = line.split()
if assert2fields:
assert(len(parts) == 2)
dict[parts[0]] = value_processor(parts[1:])
return dict
def WriteDictToFile(dict, file_name):
file = open(file_name, 'w')
keys = dict.keys()
keys.sort()
for key in keys:
value = dict[key]
if type(value) in [list, tuple] :
if type(value) is tuple:
value = list(value)
value.sort()
value = ' '.join(value)
file.write('{0}\t{1}\n'.format(key, value))
file.close()
def ParseDataDirInfo(data_dir):
data_dir_file = lambda file_name: '{0}/{1}'.format(data_dir, file_name)
utt2spk = ParseFileToDict(data_dir_file('utt2spk'))
spk2utt = ParseFileToDict(data_dir_file('spk2utt'), value_processor = lambda x: x)
text = ParseFileToDict(data_dir_file('text'), value_processor = lambda x: " ".join(x))
# we want to assert feats.scp has just 2 fields, as we don't know how
# to process it otherwise
feat = ParseFileToDict(data_dir_file('feats.scp'), assert2fields = True)
utt2dur = ParseFileToDict(data_dir_file('utt2dur'), value_processor = lambda x: float(x[0]))
utt2uniq = None
if os.path.exists(data_dir_file('utt2uniq')):
utt2uniq = ParseFileToDict(data_dir_file('utt2uniq'))
return utt2spk, spk2utt, text, feat, utt2dur, utt2uniq
def GetCombinedUttIndexRange(utt_index, utts, utt_durs, minimum_duration):
# We want the minimum number of concatenations
# to reach the minimum_duration. If two concatenations satisfy
# the minimum duration constraint we choose the shorter one.
left_index = utt_index - 1
right_index = utt_index + 1
num_remaining_segments = len(utts) - 1
cur_utt_dur = utt_durs[utts[utt_index]]
while num_remaining_segments > 0:
left_utt_dur = 0
if left_index >= 0:
left_utt_dur = utt_durs[utts[left_index]]
right_utt_dur = 0
if right_index <= len(utts) - 1:
right_utt_dur = utt_durs[utts[right_index]]
right_combined_utt_dur = cur_utt_dur + right_utt_dur
left_combined_utt_dur = cur_utt_dur + left_utt_dur
left_right_combined_utt_dur = cur_utt_dur + left_utt_dur + right_utt_dur
combine_left_exit = False
combine_right_exit = False
if right_combined_utt_dur >= minimum_duration:
if left_combined_utt_dur >= minimum_duration:
if left_combined_utt_dur <= right_combined_utt_dur:
combine_left_exit = True
else:
combine_right_exit = True
else:
combine_right_exit = True
elif left_combined_utt_dur >= minimum_duration:
combine_left_exit = True
elif left_right_combined_utt_dur >= minimum_duration :
combine_left_exit = True
combine_right_exit = True
if combine_left_exit and combine_right_exit:
cur_utt_dur = left_right_combined_utt_dur
break
elif combine_left_exit:
cur_utt_dur = left_combined_utt_dur
# move back the right_index as we don't need to combine it
right_index = right_index - 1
break
elif combine_right_exit:
cur_utt_dur = right_combined_utt_dur
# move back the left_index as we don't need to combine it
left_index = left_index + 1
break
# couldn't satisfy minimum duration requirement so continue search
if left_index >= 0:
num_remaining_segments = num_remaining_segments - 1
if right_index <= len(utts) - 1:
num_remaining_segments = num_remaining_segments - 1
left_index = left_index - 1
right_index = right_index + 1
cur_utt_dur = left_right_combined_utt_dur
left_index = max(0, left_index)
right_index = min(len(utts)-1, right_index)
return left_index, right_index, cur_utt_dur
def WriteCombinedDirFiles(output_dir, utt2spk, spk2utt, text, feat, utt2dur, utt2uniq):
out_dir_file = lambda file_name: '{0}/{1}'.format(output_dir, file_name)
total_combined_utt_list = []
for speaker in spk2utt.keys():
utts = spk2utt[speaker]
for utt in utts:
if type(utt) is tuple:
#this is a combined utt
total_combined_utt_list.append((speaker, utt))
for speaker, combined_utt_tuple in total_combined_utt_list:
combined_utt_list = list(combined_utt_tuple)
combined_utt_list.sort()
new_utt_name = "-".join(combined_utt_list)+'-appended'
# updating the utt2spk dict
for utt in combined_utt_list:
spk_name = utt2spk.pop(utt)
utt2spk[new_utt_name] = spk_name
# updating the spk2utt dict
spk2utt[speaker].remove(combined_utt_tuple)
spk2utt[speaker].append(new_utt_name)
# updating the text dict
combined_text = []
for utt in combined_utt_list:
combined_text.append(text.pop(utt))
text[new_utt_name] = ' '.join(combined_text)
# updating the feat dict
combined_feat = []
for utt in combined_utt_list:
combined_feat.append(feat.pop(utt))
feat_command = "concat-feats --print-args=false {feats} - |".format(feats = " ".join(combined_feat))
feat[new_utt_name] = feat_command
# updating utt2dur
combined_dur = 0
for utt in combined_utt_list:
combined_dur += utt2dur.pop(utt)
utt2dur[new_utt_name] = combined_dur
# updating utt2uniq
if utt2uniq is not None:
combined_uniqs = []
for utt in combined_utt_list:
combined_uniqs.append(utt2uniq.pop(utt))
# utt2uniq file is used to map perturbed data to original unperturbed
# versions so that the training cross validation sets can avoid overlap
# of data however if perturbation changes the length of the utterance
# (e.g. speed perturbation) the utterance combinations in each
# perturbation of the original recording can be very different. So there
# is no good way to find the utt2uniq mapping so that we can avoid
# overlap.
utt2uniq[new_utt_name] = combined_uniqs[0]
WriteDictToFile(utt2spk, out_dir_file('utt2spk'))
WriteDictToFile(spk2utt, out_dir_file('spk2utt'))
WriteDictToFile(feat, out_dir_file('feats.scp'))
WriteDictToFile(text, out_dir_file('text'))
if utt2uniq is not None:
WriteDictToFile(utt2uniq, out_dir_file('utt2uniq'))
WriteDictToFile(utt2dur, out_dir_file('utt2dur'))
def CombineSegments(input_dir, output_dir, minimum_duration):
utt2spk, spk2utt, text, feat, utt2dur, utt2uniq = ParseDataDirInfo(input_dir)
total_combined_utt_list = []
# copy the duration dictionary so that we can modify it
utt_durs = copy.deepcopy(utt2dur)
speakers = spk2utt.keys()
speakers.sort()
for speaker in speakers:
utts = spk2utt[speaker] # this is an assignment of the reference
# In WriteCombinedDirFiles the values of spk2utt will have the list
# of combined utts which will be used as reference
# we make an assumption that the sorted uttlist corresponds
# to contiguous segments. This is true only if utt naming
# is done according to accepted conventions
# this is an easily violatable assumption. Have to think of a better
# way to do this.
utts.sort()
utt_index = 0
while utt_index < len(utts):
if utt_durs[utts[utt_index]] < minimum_duration:
left_index, right_index, cur_utt_dur = GetCombinedUttIndexRange(utt_index, utts, utt_durs, minimum_duration)
if not cur_utt_dur >= minimum_duration:
# this is a rare occurrence, better make the user aware of this
# situation and let them deal with it
warnings.warn('Speaker {0} does not have enough utterances to satisfy the minimum duration '
'constraint. Not modifying these utterances'.format(speaker))
utt_index = utt_index + 1
continue
combined_duration = 0
combined_utts = []
# update the utts_dur dictionary
for utt in utts[left_index:right_index + 1]:
combined_duration += utt_durs.pop(utt)
if type(utt) is tuple:
for item in utt:
combined_utts.append(item)
else:
combined_utts.append(utt)
combined_utts = tuple(combined_utts) # converting to immutable type to use as dictionary key
assert(cur_utt_dur == combined_duration)
# now modify the utts list
combined_indices = range(left_index, right_index + 1)
# start popping from the largest index so that the lower
# indexes are valid
for i in combined_indices[::-1]:
utts.pop(i)
utts.insert(left_index, combined_utts)
utt_durs[combined_utts] = combined_duration
utt_index = left_index
utt_index = utt_index + 1
WriteCombinedDirFiles(output_dir, utt2spk, spk2utt, text, feat, utt2dur, utt2uniq)
def Main():
print("""steps/cleanup/combine_short_segments.py: warning: this script is deprecated and will be removed.
Please use utils/data/combine_short_segments.sh""", file = sys.stderr)
args = GetArgs()
CheckFiles(args.input_data_dir)
MakeDir(args.output_data_dir)
feat_lengths = {}
segments_file = '{0}/segments'.format(args.input_data_dir)
RunKaldiCommand("utils/data/get_utt2dur.sh {0}".format(args.input_data_dir))
CombineSegments(args.input_data_dir, args.output_data_dir, args.minimum_duration)
RunKaldiCommand("utils/utt2spk_to_spk2utt.pl {od}/utt2spk > {od}/spk2utt".format(od = args.output_data_dir))
if os.path.exists('{0}/cmvn.scp'.format(args.input_data_dir)):
shutil.copy('{0}/cmvn.scp'.format(args.input_data_dir), args.output_data_dir)
RunKaldiCommand("utils/fix_data_dir.sh {0}".format(args.output_data_dir))
if __name__ == "__main__":
Main()
|
|
"""Symbolic primitives + unicode/ASCII abstraction for pretty.py"""
from __future__ import print_function, division
import sys
import warnings
unicode_warnings = ''
from sympy.core.compatibility import u, unicode, range
# first, setup unicodedate environment
try:
import unicodedata
def U(name):
"""unicode character by name or None if not found"""
try:
u = unicodedata.lookup(name)
except KeyError:
u = None
global unicode_warnings
unicode_warnings += 'No \'%s\' in unicodedata\n' % name
return u
except ImportError:
unicode_warnings += 'No unicodedata available\n'
U = lambda name: None
from sympy.printing.conventions import split_super_sub
from sympy.core.alphabets import greeks
# prefix conventions when constructing tables
# L - LATIN i
# G - GREEK beta
# D - DIGIT 0
# S - SYMBOL +
__all__ = ['greek_unicode', 'sub', 'sup', 'xsym', 'vobj', 'hobj', 'pretty_symbol',
'annotated']
_use_unicode = False
def pretty_use_unicode(flag=None):
"""Set whether pretty-printer should use unicode by default"""
global _use_unicode
global unicode_warnings
if flag is None:
return _use_unicode
# we know that some letters are not supported in Python 2.X so
# ignore those warnings. Remove this when 2.X support is dropped.
if unicode_warnings:
known = ['LATIN SUBSCRIPT SMALL LETTER %s' % i for i in 'HKLMNPST']
unicode_warnings = '\n'.join([
l for l in unicode_warnings.splitlines() if not any(
i in l for i in known)])
# ------------ end of 2.X warning filtering
if flag and unicode_warnings:
# print warnings (if any) on first unicode usage
warnings.warn(unicode_warnings)
unicode_warnings = ''
use_unicode_prev = _use_unicode
_use_unicode = flag
return use_unicode_prev
def pretty_try_use_unicode():
"""See if unicode output is available and leverage it if possible"""
try:
symbols = []
# see, if we can represent greek alphabet
symbols.extend(greek_unicode.values())
# and atoms
symbols += atoms_table.values()
for s in symbols:
if s is None:
return # common symbols not present!
encoding = getattr(sys.stdout, 'encoding', None)
# this happens when e.g. stdout is redirected through a pipe, or is
# e.g. a cStringIO.StringO
if encoding is None:
return # sys.stdout has no encoding
# try to encode
s.encode(encoding)
except UnicodeEncodeError:
pass
else:
pretty_use_unicode(True)
def xstr(*args):
"""call str or unicode depending on current mode"""
if _use_unicode:
return unicode(*args)
else:
return str(*args)
# GREEK
g = lambda l: U('GREEK SMALL LETTER %s' % l.upper())
G = lambda l: U('GREEK CAPITAL LETTER %s' % l.upper())
greek_letters = list(greeks) # make a copy
# deal with Unicode's funny spelling of lambda
greek_letters[greek_letters.index('lambda')] = 'lamda'
# {} greek letter -> (g,G)
greek_unicode = {l: (g(l), G(l)) for l in greek_letters}
greek_unicode = dict((L, g(L)) for L in greek_letters)
greek_unicode.update((L[0].upper() + L[1:], G(L)) for L in greek_letters)
# aliases
greek_unicode['lambda'] = greek_unicode['lamda']
greek_unicode['Lambda'] = greek_unicode['Lamda']
greek_unicode['varsigma'] = u'\N{GREEK SMALL LETTER FINAL SIGMA}'
digit_2txt = {
'0': 'ZERO',
'1': 'ONE',
'2': 'TWO',
'3': 'THREE',
'4': 'FOUR',
'5': 'FIVE',
'6': 'SIX',
'7': 'SEVEN',
'8': 'EIGHT',
'9': 'NINE',
}
symb_2txt = {
'+': 'PLUS SIGN',
'-': 'MINUS',
'=': 'EQUALS SIGN',
'(': 'LEFT PARENTHESIS',
')': 'RIGHT PARENTHESIS',
'[': 'LEFT SQUARE BRACKET',
']': 'RIGHT SQUARE BRACKET',
'{': 'LEFT CURLY BRACKET',
'}': 'RIGHT CURLY BRACKET',
# non-std
'{}': 'CURLY BRACKET',
'sum': 'SUMMATION',
'int': 'INTEGRAL',
}
# SUBSCRIPT & SUPERSCRIPT
LSUB = lambda letter: U('LATIN SUBSCRIPT SMALL LETTER %s' % letter.upper())
GSUB = lambda letter: U('GREEK SUBSCRIPT SMALL LETTER %s' % letter.upper())
DSUB = lambda digit: U('SUBSCRIPT %s' % digit_2txt[digit])
SSUB = lambda symb: U('SUBSCRIPT %s' % symb_2txt[symb])
LSUP = lambda letter: U('SUPERSCRIPT LATIN SMALL LETTER %s' % letter.upper())
DSUP = lambda digit: U('SUPERSCRIPT %s' % digit_2txt[digit])
SSUP = lambda symb: U('SUPERSCRIPT %s' % symb_2txt[symb])
sub = {} # symb -> subscript symbol
sup = {} # symb -> superscript symbol
# latin subscripts
for l in 'aeioruvxhklmnpst':
sub[l] = LSUB(l)
for l in 'in':
sup[l] = LSUP(l)
for gl in ['beta', 'gamma', 'rho', 'phi', 'chi']:
sub[gl] = GSUB(gl)
for d in [str(i) for i in range(10)]:
sub[d] = DSUB(d)
sup[d] = DSUP(d)
for s in '+-=()':
sub[s] = SSUB(s)
sup[s] = SSUP(s)
# Variable modifiers
# TODO: Is it worth trying to handle faces with, e.g., 'MATHEMATICAL BOLD CAPITAL A'?
# TODO: Make brackets adjust to height of contents
modifier_dict = {
# Accents
'mathring': lambda s: s+u'\N{COMBINING RING ABOVE}',
'ddddot': lambda s: s+u'\N{COMBINING DIAERESIS}\N{COMBINING DIAERESIS}',
'dddot': lambda s: s+u'\N{COMBINING DIAERESIS}\N{COMBINING DOT ABOVE}',
'ddot': lambda s: s+u'\N{COMBINING DIAERESIS}',
'dot': lambda s: s+u'\N{COMBINING DOT ABOVE}',
'check': lambda s: s+u'\N{COMBINING CARON}',
'breve': lambda s: s+u'\N{COMBINING BREVE}',
'acute': lambda s: s+u'\N{COMBINING ACUTE ACCENT}',
'grave': lambda s: s+u'\N{COMBINING GRAVE ACCENT}',
'tilde': lambda s: s+u'\N{COMBINING TILDE}',
'hat': lambda s: s+u'\N{COMBINING CIRCUMFLEX ACCENT}',
'bar': lambda s: s+u'\N{COMBINING OVERLINE}',
'vec': lambda s: s+u'\N{COMBINING RIGHT ARROW ABOVE}',
'prime': lambda s: s+u'\N{PRIME}',
'prm': lambda s: s+u'\N{PRIME}',
# # Faces -- these are here for some compatibility with latex printing
# 'bold': lambda s: s,
# 'bm': lambda s: s,
# 'cal': lambda s: s,
# 'scr': lambda s: s,
# 'frak': lambda s: s,
# Brackets
'norm': lambda s: u'\N{DOUBLE VERTICAL LINE}'+s+u'\N{DOUBLE VERTICAL LINE}',
'avg': lambda s: u'\N{MATHEMATICAL LEFT ANGLE BRACKET}'+s+u'\N{MATHEMATICAL RIGHT ANGLE BRACKET}',
'abs': lambda s: u'\N{VERTICAL LINE}'+s+u'\N{VERTICAL LINE}',
'mag': lambda s: u'\N{VERTICAL LINE}'+s+u'\N{VERTICAL LINE}',
}
# VERTICAL OBJECTS
HUP = lambda symb: U('%s UPPER HOOK' % symb_2txt[symb])
CUP = lambda symb: U('%s UPPER CORNER' % symb_2txt[symb])
MID = lambda symb: U('%s MIDDLE PIECE' % symb_2txt[symb])
EXT = lambda symb: U('%s EXTENSION' % symb_2txt[symb])
HLO = lambda symb: U('%s LOWER HOOK' % symb_2txt[symb])
CLO = lambda symb: U('%s LOWER CORNER' % symb_2txt[symb])
TOP = lambda symb: U('%s TOP' % symb_2txt[symb])
BOT = lambda symb: U('%s BOTTOM' % symb_2txt[symb])
# {} '(' -> (extension, start, end, middle) 1-character
_xobj_unicode = {
# vertical symbols
# (( ext, top, bot, mid ), c1)
'(': (( EXT('('), HUP('('), HLO('(') ), '('),
')': (( EXT(')'), HUP(')'), HLO(')') ), ')'),
'[': (( EXT('['), CUP('['), CLO('[') ), '['),
']': (( EXT(']'), CUP(']'), CLO(']') ), ']'),
'{': (( EXT('{}'), HUP('{'), HLO('{'), MID('{') ), '{'),
'}': (( EXT('{}'), HUP('}'), HLO('}'), MID('}') ), '}'),
'|': U('BOX DRAWINGS LIGHT VERTICAL'),
'<': ((U('BOX DRAWINGS LIGHT VERTICAL'),
U('BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT'),
U('BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT')), '<'),
'>': ((U('BOX DRAWINGS LIGHT VERTICAL'),
U('BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT'),
U('BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT')), '>'),
'lfloor': (( EXT('['), EXT('['), CLO('[') ), U('LEFT FLOOR')),
'rfloor': (( EXT(']'), EXT(']'), CLO(']') ), U('RIGHT FLOOR')),
'lceil': (( EXT('['), CUP('['), EXT('[') ), U('LEFT CEILING')),
'rceil': (( EXT(']'), CUP(']'), EXT(']') ), U('RIGHT CEILING')),
'int': (( EXT('int'), U('TOP HALF INTEGRAL'), U('BOTTOM HALF INTEGRAL') ), U('INTEGRAL')),
'sum': (( U('BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT'), '_', U('OVERLINE'), U('BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT')), U('N-ARY SUMMATION')),
# horizontal objects
#'-': '-',
'-': U('BOX DRAWINGS LIGHT HORIZONTAL'),
'_': U('LOW LINE'),
# We used to use this, but LOW LINE looks better for roots, as it's a
# little lower (i.e., it lines up with the / perfectly. But perhaps this
# one would still be wanted for some cases?
# '_': U('HORIZONTAL SCAN LINE-9'),
# diagonal objects '\' & '/' ?
'/': U('BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT'),
'\\': U('BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT'),
}
_xobj_ascii = {
# vertical symbols
# (( ext, top, bot, mid ), c1)
'(': (( '|', '/', '\\' ), '('),
')': (( '|', '\\', '/' ), ')'),
# XXX this looks ugly
# '[': (( '|', '-', '-' ), '['),
# ']': (( '|', '-', '-' ), ']'),
# XXX not so ugly :(
'[': (( '[', '[', '[' ), '['),
']': (( ']', ']', ']' ), ']'),
'{': (( '|', '/', '\\', '<' ), '{'),
'}': (( '|', '\\', '/', '>' ), '}'),
'|': '|',
'<': (( '|', '/', '\\' ), '<'),
'>': (( '|', '\\', '/' ), '>'),
'int': ( ' | ', ' /', '/ ' ),
# horizontal objects
'-': '-',
'_': '_',
# diagonal objects '\' & '/' ?
'/': '/',
'\\': '\\',
}
def xobj(symb, length):
"""Construct spatial object of given length.
return: [] of equal-length strings
"""
if length <= 0:
raise ValueError("Length should be greater than 0")
# TODO robustify when no unicodedat available
if _use_unicode:
_xobj = _xobj_unicode
else:
_xobj = _xobj_ascii
vinfo = _xobj[symb]
c1 = top = bot = mid = None
if not isinstance(vinfo, tuple): # 1 entry
ext = vinfo
else:
if isinstance(vinfo[0], tuple): # (vlong), c1
vlong = vinfo[0]
c1 = vinfo[1]
else: # (vlong), c1
vlong = vinfo
ext = vlong[0]
try:
top = vlong[1]
bot = vlong[2]
mid = vlong[3]
except IndexError:
pass
if c1 is None:
c1 = ext
if top is None:
top = ext
if bot is None:
bot = ext
if mid is not None:
if (length % 2) == 0:
# even height, but we have to print it somehow anyway...
# XXX is it ok?
length += 1
else:
mid = ext
if length == 1:
return c1
res = []
next = (length - 2)//2
nmid = (length - 2) - next*2
res += [top]
res += [ext]*next
res += [mid]*nmid
res += [ext]*next
res += [bot]
return res
def vobj(symb, height):
"""Construct vertical object of a given height
see: xobj
"""
return '\n'.join( xobj(symb, height) )
def hobj(symb, width):
"""Construct horizontal object of a given width
see: xobj
"""
return ''.join( xobj(symb, width) )
# RADICAL
# n -> symbol
root = {
2: U('SQUARE ROOT'), # U('RADICAL SYMBOL BOTTOM')
3: U('CUBE ROOT'),
4: U('FOURTH ROOT'),
}
# RATIONAL
VF = lambda txt: U('VULGAR FRACTION %s' % txt)
# (p,q) -> symbol
frac = {
(1, 2): VF('ONE HALF'),
(1, 3): VF('ONE THIRD'),
(2, 3): VF('TWO THIRDS'),
(1, 4): VF('ONE QUARTER'),
(3, 4): VF('THREE QUARTERS'),
(1, 5): VF('ONE FIFTH'),
(2, 5): VF('TWO FIFTHS'),
(3, 5): VF('THREE FIFTHS'),
(4, 5): VF('FOUR FIFTHS'),
(1, 6): VF('ONE SIXTH'),
(5, 6): VF('FIVE SIXTHS'),
(1, 8): VF('ONE EIGHTH'),
(3, 8): VF('THREE EIGHTHS'),
(5, 8): VF('FIVE EIGHTHS'),
(7, 8): VF('SEVEN EIGHTHS'),
}
# atom symbols
_xsym = {
'==': ('=', '='),
'<': ('<', '<'),
'>': ('>', '>'),
'<=': ('<=', U('LESS-THAN OR EQUAL TO')),
'>=': ('>=', U('GREATER-THAN OR EQUAL TO')),
'!=': ('!=', U('NOT EQUAL TO')),
':=': (':=', ':='),
'*': ('*', U('DOT OPERATOR')),
'-->': ('-->', U('EM DASH') + U('EM DASH') +
U('BLACK RIGHT-POINTING TRIANGLE') if U('EM DASH')
and U('BLACK RIGHT-POINTING TRIANGLE') else None),
'==>': ('==>', U('BOX DRAWINGS DOUBLE HORIZONTAL') +
U('BOX DRAWINGS DOUBLE HORIZONTAL') +
U('BLACK RIGHT-POINTING TRIANGLE') if
U('BOX DRAWINGS DOUBLE HORIZONTAL') and
U('BOX DRAWINGS DOUBLE HORIZONTAL') and
U('BLACK RIGHT-POINTING TRIANGLE') else None),
'.': ('*', U('RING OPERATOR')),
}
def xsym(sym):
"""get symbology for a 'character'"""
op = _xsym[sym]
if _use_unicode:
return op[1]
else:
return op[0]
# SYMBOLS
atoms_table = {
# class how-to-display
'Exp1': U('SCRIPT SMALL E'),
'Pi': U('GREEK SMALL LETTER PI'),
'Infinity': U('INFINITY'),
'NegativeInfinity': U('INFINITY') and ('-' + U('INFINITY')), # XXX what to do here
#'ImaginaryUnit': U('GREEK SMALL LETTER IOTA'),
#'ImaginaryUnit': U('MATHEMATICAL ITALIC SMALL I'),
'ImaginaryUnit': U('DOUBLE-STRUCK ITALIC SMALL I'),
'EmptySet': U('EMPTY SET'),
'Naturals': U('DOUBLE-STRUCK CAPITAL N'),
'Naturals0': (U('DOUBLE-STRUCK CAPITAL N') and
(U('DOUBLE-STRUCK CAPITAL N') +
U('SUBSCRIPT ZERO'))),
'Integers': U('DOUBLE-STRUCK CAPITAL Z'),
'Reals': U('DOUBLE-STRUCK CAPITAL R'),
'Complexes': U('DOUBLE-STRUCK CAPITAL C'),
'Union': U('UNION'),
'SymmetricDifference': U('INCREMENT'),
'Intersection': U('INTERSECTION'),
'Ring': U('RING OPERATOR')
}
def pretty_atom(atom_name, default=None):
"""return pretty representation of an atom"""
if _use_unicode:
return atoms_table[atom_name]
else:
if default is not None:
return default
raise KeyError('only unicode') # send it default printer
def pretty_symbol(symb_name):
"""return pretty representation of a symbol"""
# let's split symb_name into symbol + index
# UC: beta1
# UC: f_beta
if not _use_unicode:
return symb_name
name, sups, subs = split_super_sub(symb_name)
def translate(s) :
gG = greek_unicode.get(s)
if gG is not None:
return gG
for key in sorted(modifier_dict.keys(), key=lambda k:len(k), reverse=True) :
if s.lower().endswith(key) and len(s)>len(key):
return modifier_dict[key](translate(s[:-len(key)]))
return s
name = translate(name)
# Let's prettify sups/subs. If it fails at one of them, pretty sups/subs are
# not used at all.
def pretty_list(l, mapping):
result = []
for s in l:
pretty = mapping.get(s)
if pretty is None:
try: # match by separate characters
pretty = ''.join([mapping[c] for c in s])
except (TypeError, KeyError):
return None
result.append(pretty)
return result
pretty_sups = pretty_list(sups, sup)
if pretty_sups is not None:
pretty_subs = pretty_list(subs, sub)
else:
pretty_subs = None
# glue the results into one string
if pretty_subs is None: # nice formatting of sups/subs did not work
if subs:
name += '_'+'_'.join([translate(s) for s in subs])
if sups:
name += '__'+'__'.join([translate(s) for s in sups])
return name
else:
sups_result = ' '.join(pretty_sups)
subs_result = ' '.join(pretty_subs)
return ''.join([name, sups_result, subs_result])
def annotated(letter):
"""
Return a stylised drawing of the letter ``letter``, together with
information on how to put annotations (super- and subscripts to the
left and to the right) on it.
See pretty.py functions _print_meijerg, _print_hyper on how to use this
information.
"""
ucode_pics = {
'F': (2, 0, 2, 0, u('\N{BOX DRAWINGS LIGHT DOWN AND RIGHT}\N{BOX DRAWINGS LIGHT HORIZONTAL}\n'
'\N{BOX DRAWINGS LIGHT VERTICAL AND RIGHT}\N{BOX DRAWINGS LIGHT HORIZONTAL}\n'
'\N{BOX DRAWINGS LIGHT UP}')),
'G': (3, 0, 3, 1,
u('\N{BOX DRAWINGS LIGHT ARC DOWN AND RIGHT}\N{BOX DRAWINGS LIGHT HORIZONTAL}\N{BOX DRAWINGS LIGHT ARC DOWN AND LEFT}\n'
'\N{BOX DRAWINGS LIGHT VERTICAL}\N{BOX DRAWINGS LIGHT RIGHT}\N{BOX DRAWINGS LIGHT DOWN AND LEFT}\n'
'\N{BOX DRAWINGS LIGHT ARC UP AND RIGHT}\N{BOX DRAWINGS LIGHT HORIZONTAL}\N{BOX DRAWINGS LIGHT ARC UP AND LEFT}'))
}
ascii_pics = {
'F': (3, 0, 3, 0, ' _\n|_\n|\n'),
'G': (3, 0, 3, 1, ' __\n/__\n\_|')
}
if _use_unicode:
return ucode_pics[letter]
else:
return ascii_pics[letter]
|
|
import base64
import collections
import functools
import logging
import six
from six.moves import urllib
from . import cosmos
from .. import util
from ..errors import (DCOSAuthenticationException,
DCOSAuthorizationException, DCOSBadRequest,
DCOSConnectionError, DCOSException, DCOSHTTPException)
logger = logging.getLogger(__name__)
def cosmos_error(fn):
"""Decorator for errors returned from cosmos
:param fn: function to check for errors from cosmos
:type fn: function
:rtype: requests.Response
:returns: requests.Response
"""
@functools.wraps(fn)
def check_for_cosmos_error(*args, **kwargs):
"""Returns response from cosmos or raises exception
:returns: Response or raises Exception
:rtype: requests.Response
"""
error_media_type = 'application/vnd.dcos.package.error+json;' \
'charset=utf-8;version=v1'
response = fn(*args, **kwargs)
content_type = response.headers.get('Content-Type')
if content_type is None:
raise DCOSHTTPException(response)
elif error_media_type in content_type:
logger.debug("Error: {}".format(response.json()))
error_msg = _format_error_message(response.json())
raise DCOSException(error_msg)
return response
return check_for_cosmos_error
class PackageManager:
"""Implementation of Package Manager using Cosmos"""
def __init__(self, cosmos_url):
self.cosmos_url = cosmos_url
self.cosmos = cosmos.Cosmos(self.cosmos_url)
def has_capability(self, capability):
"""Check if cluster has a capability.
:param capability: capability name
:type capability: string
:return: does the cluster has capability
:rtype: bool
"""
if not self.enabled():
return False
try:
response = self.cosmos.call_endpoint(
'capabilities').json()
except DCOSAuthenticationException:
raise
except DCOSAuthorizationException:
raise
except DCOSConnectionError:
raise
except Exception as e:
logger.exception(e)
return False
if 'capabilities' not in response:
logger.error(
'Request to get cluster capabilities: {} '
'returned unexpected response: {}. '
'Missing "capabilities" field'.format(
urllib.parse.urljoin(self.cosmos_url, 'capabilities'),
response))
return False
return {'name': capability} in response['capabilities']
def enabled(self):
"""Returns whether or not cosmos is enabled on specified dcos cluster
:rtype: bool
"""
return self.cosmos.enabled()
def install_app(self, pkg, options, app_id):
"""Installs a package's application
:param pkg: the package to install
:type pkg: CosmosPackageVersion
:param options: user supplied package parameters
:type options: dict
:param app_id: app ID for installation of this package
:type app_id: str
:rtype: None
"""
params = {"packageName": pkg.name(), "packageVersion": pkg.version()}
if options is not None:
params["options"] = options
if app_id is not None:
params["appId"] = app_id
self.cosmos_post("install", params)
def uninstall_app(self, package_name, remove_all, app_id):
"""Uninstalls an app.
:param package_name: The package to uninstall
:type package_name: str
:param remove_all: Whether to remove all instances of the named app
:type remove_all: boolean
:param app_id: App ID of the app instance to uninstall
:type app_id: str
:returns: whether uninstall was successful or not
:rtype: bool
"""
params = {"packageName": package_name}
if remove_all is True:
params["all"] = True
if app_id is not None:
params["appId"] = app_id
response = self.cosmos_post("uninstall", params)
results = response.json().get("results")
uninstalled_versions = []
for res in results:
version = res.get("packageVersion")
if version not in uninstalled_versions:
logger.info('Uninstalled package [%s] version [%s]', res.get("packageName"), res.get("packageVersion"))
uninstalled_versions += [res.get("packageVersion")]
if res.get("postUninstallNotes") is not None:
logger.info(res.get("postUninstallNotes"))
return True
def search_sources(self, query):
"""package search
:param query: query to search
:type query: str
:returns: list of package indicies of matching packages
:rtype: [packages]
"""
response = self.cosmos_post("search", {"query": query})
return response.json()
def get_package_version(self, package_name, package_version):
"""Returns PackageVersion of specified package
:param package_name: package name
:type package_name: str
:param package_version: version of package
:type package_version: str | None
:rtype: PackageVersion
"""
return CosmosPackageVersion(package_name, package_version,
self.cosmos_url)
def installed_apps(self, package_name, app_id):
"""List installed packages
{
'appId': <appId>,
..<package.json properties>..
}
:param package_name: the optional package to list
:type package_name: str
:param app_id: the optional application id to list
:type app_id: str
:rtype: [dict]
"""
params = {}
if package_name is not None:
params["packageName"] = package_name
if app_id is not None:
params["appId"] = app_id
list_response = self.cosmos_post("list", params).json()
packages = []
for pkg in list_response['packages']:
result = pkg['packageInformation']['packageDefinition']
result['appId'] = pkg['appId']
packages.append(result)
return packages
def get_repos(self):
"""List locations of repos
:returns: the list of repos, in resolution order or list
:rtype: dict
"""
return self.cosmos_post("repository/list", params={}).json()
def add_repo(self, name, package_repo, index):
"""Add package repo and update repo with new repo
:param name: name to call repo
:type name: str
:param package_repo: location of repo to add
:type package_repo: str
:param index: index to add this repo
:type index: int
:returns: current repo list
:rtype: dict
"""
params = {"name": name, "uri": package_repo}
if index is not None:
params["index"] = index
response = self.cosmos_post("repository/add", params=params)
return response.json()
def remove_repo(self, name):
"""Remove package repo and update repo
:param name: name of repo to remove
:type name: str
:returns: current repo list
:rtype: dict
"""
params = {"name": name}
response = self.cosmos_post("repository/delete", params=params)
return response.json()
def package_add_local(self, dcos_package):
"""
Adds a locally stored DC/OS package to DC/OS
:param dcos_package: path to the DC/OS package
:type dcos_package: None | str
:return: Response to the package add request
:rtype: requests.Response
"""
try:
with util.open_file(dcos_package, 'rb') as pkg:
extra_headers = {
'Content-Type':
'application/vnd.dcos.'
'universe.package+zip;version=v1',
'X-Dcos-Content-MD5': util.md5_hash_file(pkg)
}
return self._post('add', headers=extra_headers, data=pkg)
except DCOSHTTPException as e:
if e.status() == 404:
message = 'Your version of DC/OS ' \
'does not support this operation'
raise DCOSException(message)
else:
raise e
def package_add_remote(self, package_name, package_version):
"""
Adds a remote DC/OS package to DC/OS
:param package_name: name of the remote package to add
:type package_name: None | str
:param package_version: version of the remote package to add
:type package_version: None | str
:return: Response to the package add request
:rtype: requests.Response
"""
try:
json = {'packageName': package_name}
if package_version is not None:
json['packageVersion'] = package_version
return self._post('add', params=json)
except DCOSHTTPException as e:
if e.status() == 404:
message = 'Your version of DC/OS ' \
'does not support this operation'
raise DCOSException(message)
else:
raise e
@cosmos_error
def _post(self, request, params=None, headers=None, data=None):
"""Request to cosmos server
:param request: type of request
:type request: str
:param params: body of request
:type params: dict
:param headers: list of headers for request in order of preference
:type headers: [str]
:param data: a file object
:type: file
:returns: Response
:rtype: requests.Response
"""
endpoint = 'package/{}'.format(request)
try:
return self.cosmos.call_endpoint(
endpoint, headers, data=data, json=params)
except DCOSAuthenticationException:
raise
except DCOSAuthorizationException:
raise
except DCOSBadRequest as e:
return e.response
except DCOSHTTPException as e:
# let non authentication responses be handled by `cosmos_error` so
# we can expose errors reported by cosmos
return e.response
def cosmos_post(self, request, params):
"""Request to cosmos server
:param request: type of request
:type request: str
:param params: body of request
:type params: dict
:returns: Response
:rtype: requests.Response
"""
return self._post(request, params)
class CosmosPackageVersion():
"""Interface to a specific package version from cosmos"""
def __init__(self, name, package_version, url):
self._cosmos_url = url
params = {"packageName": name}
if package_version is not None:
params["packageVersion"] = package_version
response = PackageManager(url).cosmos_post("describe", params)
self._package_json = response.json()
self._content_type = response.headers['Content-Type']
def version(self):
"""Returns the package version.
:returns: The version of this package
:rtype: str
"""
return self.package_json()["version"]
def name(self):
"""Returns the package name.
:returns: The name of this package
:rtype: str
"""
return self.package_json()["name"]
def package_json(self):
"""Returns the JSON content of the package definition.
:returns: Package data
:rtype: dict
"""
if 'version=v2' in self._content_type:
return self._package_json
else:
return self._package_json["package"]
def package_response(self):
"""Returns the JSON content of the describe response.
:returns: Package data
:rtype: dict
"""
return self._package_json
def config_json(self):
"""Returns the JSON content of the config.json file.
:returns: Package config schema
:rtype: dict | None
"""
return self.package_json().get("config")
def resource_json(self):
"""Returns the JSON content of the resource.json file.
:returns: Package resources
:rtype: dict | None
"""
return self.package_json().get("resource")
def marathon_template(self):
"""Returns raw data from marathon.json
:returns: raw data from marathon.json
:rtype: str | None
"""
template = self.package_json().get("marathon", {}).get(
"v2AppMustacheTemplate"
)
return base64.b64decode(template) if template else None
def marathon_json(self, options):
"""Returns the JSON content of the marathon.json template, after
rendering it with options.
:param options: the template options to use in rendering
:type options: dict
:rtype: dict
"""
params = {
"packageName": self.name(),
"packageVersion": self.version()
}
if options:
params["options"] = options
response = PackageManager(
self._cosmos_url
).cosmos_post("render", params)
return response.json().get("marathonJson")
def options(self, user_options):
"""Makes sure user supplied options are valid
:param user_options: the template options to use in rendering
:type user_options: dict
:rtype: None
"""
self.marathon_json(user_options)
return None
def cli_definition(self):
"""Returns the JSON content that defines a cli subcommand. Looks for
"cli" property in resource.json first and if that is None, checks for
command.json
:returns: Package data
:rtype: dict | None
"""
return (self.resource_json() and self.resource_json().get("cli")) or (
self.command_json()
)
def command_json(self):
"""Returns the JSON content of the command.json file.
:returns: Package data
:rtype: dict | None
"""
return self.package_json().get("command")
def package_versions(self):
"""Returns a list of available versions for this package
:returns: package version
:rtype: []
"""
params = {"packageName": self.name(), "includePackageVersions": True}
response = PackageManager(self._cosmos_url).cosmos_post(
"list-versions", params)
return list(
version for (version, releaseVersion) in
sorted(
response.json().get("results").items(),
key=lambda item: int(item[1]), # release version
reverse=True
)
)
def _format_error_message(error):
"""Returns formatted error message based on error type
:param error: cosmos error
:type error: dict
:returns: formatted error
:rtype: str
"""
if error.get("type") == "AmbiguousAppId":
helper = (".\nPlease use --app-id to specify the ID of the app "
"to uninstall, or use --all to uninstall all apps.")
error_message = error.get("message") + helper
elif error.get("type") == "MultipleFrameworkIds":
helper = ". Manually shut them down using 'dcos service shutdown'"
error_message = error.get("message") + helper
elif error.get("type") == "JsonSchemaMismatch":
error_message = _format_json_schema_mismatch_message(error)
elif error.get("type") == "MarathonBadResponse":
error_message = _format_marathon_bad_response_message(error)
elif error.get('type') == 'NotImplemented':
error_message = 'DC/OS has not been ' \
'configured to support this operation'
else:
error_message = error.get("message")
return error_message
def _format_json_schema_mismatch_message(error):
"""Returns the formatted error message for JsonSchemaMismatch
:param error: cosmos JsonSchemMismatch error
:type error: dict
:returns: formatted error
:rtype: str
"""
error_messages = ["Error: {}".format(error.get("message"))]
for err in error.get("data").get("errors"):
if err.get("unwanted"):
reason = "Unexpected properties: {}".format(err["unwanted"])
error_messages += [reason]
if err.get("found"):
found = "Found: {}".format(err["found"])
error_messages += [found]
if err.get("minimum"):
found = "Required minimum: {}".format(err["minimum"])
error_messages += [found]
if err.get("expected"):
expected = "Expected: {}".format(",".join(err["expected"]))
error_messages += [expected]
if err.get("missing"):
missing = "Required parameter missing: {}".format(
",".join(err["missing"]),
)
error_messages += [missing]
if err.get("instance"):
pointer = err["instance"].get("pointer")
formatted_path = pointer.lstrip("/").replace("/", ".")
path = "Path: {}".format(formatted_path)
error_messages += [path]
error_messages += [
"\nPlease create a JSON file with the appropriate options, and"
" pass the /path/to/file as an --options argument."
]
return "\n".join(error_messages)
def _format_marathon_bad_response_message(error):
data = error.get("data")
error_messages = [error.get("message")]
if data is not None:
for err in data.get("errors"):
if err.get("error") and isinstance(err["error"], six.string_types):
error_messages += [err["error"]]
elif err.get("errors") and \
isinstance(err["errors"], collections.Sequence):
error_messages += err["errors"]
return "\n".join(error_messages)
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from contrail_openstack_dashboard.openstack_dashboard.dashboards.project.l3routers.extensions.routerrules\
import rulemanager
from openstack_dashboard.test import helpers as test
class RouterTests(test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:l3routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:l3routers:detail' % DASHBOARD
def _mock_external_network_list(self, alter_ids=False):
search_opts = {'router:external': True}
ext_nets = [n for n in self.networks.list() if n['router:external']]
if alter_ids:
for ext_net in ext_nets:
ext_net.id += 'some extra garbage'
api.neutron.network_list(
IsA(http.HttpRequest),
**search_opts).AndReturn(ext_nets)
def _mock_external_network_get(self, router):
ext_net_id = router.external_gateway_info['network_id']
ext_net = self.networks.list()[2]
api.neutron.network_get(IsA(http.HttpRequest), ext_net_id,
expand_subnet=False).AndReturn(ext_net)
@test.create_stubs({api.neutron: ('router_list', 'network_list')})
def test_index(self):
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn(self.routers.list())
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/l3routers/index.html' % self.DASHBOARD)
routers = res.context['table'].data
self.assertItemsEqual(routers, self.routers.list())
@test.create_stubs({api.neutron: ('router_list', 'network_list')})
def test_index_router_list_exception(self):
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndRaise(self.exceptions.neutron)
self._mock_external_network_list()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/l3routers/index.html' % self.DASHBOARD)
self.assertEqual(len(res.context['table'].data), 0)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_list', 'network_list')})
def test_set_external_network_empty(self):
router = self.routers.first()
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id,
search_opts=None).AndReturn([router])
self._mock_external_network_list(alter_ids=True)
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
table_data = res.context['table'].data
self.assertEqual(len(table_data), 1)
self.assertIn('(Not Found)',
table_data[0]['external_gateway_info']['network'])
self.assertTemplateUsed(res, '%s/l3routers/index.html' % self.DASHBOARD)
self.assertMessageCount(res, error=1)
@test.create_stubs({api.neutron: ('router_get', 'port_list',
'network_get')})
def test_router_detail(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(self.routers.first())
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':l3routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertTemplateUsed(res, '%s/l3routers/detail.html' % self.DASHBOARD)
ports = res.context['interfaces_table'].data
self.assertItemsEqual(ports, [self.ports.first()])
@test.create_stubs({api.neutron: ('router_get',)})
def test_router_detail_exception(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':l3routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertRedirectsNoFollow(res, self.INDEX_URL)
class RouterActionTests(test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:l3routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:l3routers:detail' % DASHBOARD
@test.create_stubs({api.neutron: ('router_create',
'get_dvr_permission',)})
def test_router_create_post(self):
router = self.routers.first()
api.neutron.get_dvr_permission(IsA(http.HttpRequest), "create")\
.AndReturn(False)
api.neutron.router_create(IsA(http.HttpRequest), name=router.name)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name}
url = reverse('horizon:%s:l3routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_dvr_permission',)})
def test_router_create_post_mode_server_default(self):
router = self.routers.first()
api.neutron.get_dvr_permission(IsA(http.HttpRequest), "create")\
.AndReturn(True)
api.neutron.router_create(IsA(http.HttpRequest), name=router.name)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'server_default'}
url = reverse('horizon:%s:l3routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_dvr_permission',)})
def test_dvr_router_create_post(self):
router = self.routers.first()
api.neutron.get_dvr_permission(IsA(http.HttpRequest), "create")\
.MultipleTimes().AndReturn(True)
param = {'name': router.name,
'distributed': True}
api.neutron.router_create(IsA(http.HttpRequest), **param)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'name': router.name,
'mode': 'distributed'}
url = reverse('horizon:%s:l3routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_dvr_permission',)})
def test_router_create_post_exception_error_case_409(self):
router = self.routers.first()
api.neutron.get_dvr_permission(IsA(http.HttpRequest), "create")\
.MultipleTimes().AndReturn(False)
self.exceptions.neutron.status_code = 409
api.neutron.router_create(IsA(http.HttpRequest), name=router.name)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name}
url = reverse('horizon:%s:l3routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_create',
'get_dvr_permission',)})
def test_router_create_post_exception_error_case_non_409(self):
router = self.routers.first()
api.neutron.get_dvr_permission(IsA(http.HttpRequest), "create")\
.MultipleTimes().AndReturn(False)
self.exceptions.neutron.status_code = 999
api.neutron.router_create(IsA(http.HttpRequest), name=router.name)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'name': router.name}
url = reverse('horizon:%s:l3routers:create' % self.DASHBOARD)
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'get_dvr_permission')})
def _test_router_update_get(self, dvr_enabled=False,
current_dvr=False):
router = [r for r in self.routers.list()
if r.distributed == current_dvr][0]
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
api.neutron.get_dvr_permission(IsA(http.HttpRequest), "update")\
.AndReturn(dvr_enabled)
self.mox.ReplayAll()
url = reverse('horizon:%s:l3routers:update' % self.DASHBOARD,
args=[router.id])
return self.client.get(url)
def test_router_update_get_dvr_disabled(self):
res = self._test_router_update_get(dvr_enabled=False)
self.assertTemplateUsed(res, 'project/l3routers/update.html')
self.assertNotContains(res, 'Router Type')
self.assertNotContains(res, 'id="id_mode"')
def test_router_update_get_dvr_enabled_mode_centralized(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=False)
self.assertTemplateUsed(res, 'project/l3routers/update.html')
self.assertContains(res, 'Router Type')
# Check both menu are displayed.
self.assertContains(
res,
'<option value="centralized" selected="selected">'
'Centralized</option>',
html=True)
self.assertContains(
res,
'<option value="distributed">Distributed</option>',
html=True)
def test_router_update_get_dvr_enabled_mode_distributed(self):
res = self._test_router_update_get(dvr_enabled=True, current_dvr=True)
self.assertTemplateUsed(res, 'project/l3routers/update.html')
self.assertContains(res, 'Router Type')
self.assertContains(
res,
'<input class=" form-control" id="id_mode" name="mode" '
'readonly="readonly" type="text" value="distributed" />',
html=True)
self.assertNotContains(res, 'centralized')
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_dvr_permission')})
def test_router_update_post_dvr_disabled(self):
router = self.routers.first()
api.neutron.get_dvr_permission(IsA(http.HttpRequest), "update")\
.AndReturn(False)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up)\
.AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up}
url = reverse('horizon:%s:l3routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
@test.create_stubs({api.neutron: ('router_get',
'router_update',
'get_dvr_permission')})
def test_router_update_post_dvr_enabled(self):
router = self.routers.first()
api.neutron.get_dvr_permission(IsA(http.HttpRequest), "update")\
.AndReturn(True)
api.neutron.router_update(IsA(http.HttpRequest), router.id,
name=router.name,
admin_state_up=router.admin_state_up,
distributed=True)\
.AndReturn(router)
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'name': router.name,
'admin_state': router.admin_state_up,
'mode': 'distributed'}
url = reverse('horizon:%s:l3routers:update' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, self.INDEX_URL)
def _mock_network_list(self, tenant_id):
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=tenant_id).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
def _test_router_addinterface(self, raise_error=False):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
add_interface = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, subnet_id=subnet.id)
if raise_error:
add_interface.AndRaise(self.exceptions.neutron)
else:
add_interface.AndReturn({'subnet_id': subnet.id,
'port_id': port.id})
api.neutron.port_get(IsA(http.HttpRequest), port.id)\
.AndReturn(port)
self._check_router_addinterface(router, subnet)
def _check_router_addinterface(self, router, subnet, ip_address=''):
# mock APIs used to show router detail
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(router)
self._mock_network_list(router['tenant_id'])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'subnet_id': subnet.id,
'ip_address': ip_address}
url = reverse('horizon:%s:l3routers:addinterface' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'port_get',
'network_list')})
def test_router_addinterface(self):
self._test_router_addinterface()
@test.create_stubs({api.neutron: ('router_get',
'router_add_interface',
'network_list')})
def test_router_addinterface_exception(self):
self._test_router_addinterface(raise_error=True)
def _test_router_addinterface_ip_addr(self, errors=[]):
router = self.routers.first()
subnet = self.subnets.first()
port = self.ports.first()
ip_addr = port['fixed_ips'][0]['ip_address']
self._setup_mock_addinterface_ip_addr(router, subnet, port,
ip_addr, errors)
self._check_router_addinterface(router, subnet, ip_addr)
def _setup_mock_addinterface_ip_addr(self, router, subnet, port,
ip_addr, errors=[]):
subnet_get = api.neutron.subnet_get(IsA(http.HttpRequest), subnet.id)
if 'subnet_get' in errors:
subnet_get.AndRaise(self.exceptions.neutron)
return
subnet_get.AndReturn(subnet)
params = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_addr}]}
port_create = api.neutron.port_create(IsA(http.HttpRequest), **params)
if 'port_create' in errors:
port_create.AndRaise(self.exceptions.neutron)
return
port_create.AndReturn(port)
add_inf = api.neutron.router_add_interface(
IsA(http.HttpRequest), router.id, port_id=port.id)
if 'add_interface' not in errors:
return
add_inf.AndRaise(self.exceptions.neutron)
port_delete = api.neutron.port_delete(IsA(http.HttpRequest), port.id)
if 'port_delete' in errors:
port_delete.AndRaise(self.exceptions.neutron)
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr(self):
self._test_router_addinterface_ip_addr()
@test.create_stubs({api.neutron: ('subnet_get',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr_exception_subnet_get(self):
self._test_router_addinterface_ip_addr(errors=['subnet_get'])
@test.create_stubs({api.neutron: ('subnet_get', 'port_create',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr_exception_port_create(self):
self._test_router_addinterface_ip_addr(errors=['port_create'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr_exception_add_interface(self):
self._test_router_addinterface_ip_addr(errors=['add_interface'])
@test.create_stubs({api.neutron: ('router_add_interface', 'subnet_get',
'port_create', 'port_delete',
'router_get', 'network_list')})
def test_router_addinterface_ip_addr_exception_port_delete(self):
self._test_router_addinterface_ip_addr(errors=['add_interface',
'port_delete'])
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndReturn(None)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:l3routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_add_gateway',
'network_list')})
def test_router_add_gateway_exception(self):
router = self.routers.first()
network = self.networks.first()
api.neutron.router_add_gateway(
IsA(http.HttpRequest),
router.id,
network.id).AndRaise(self.exceptions.neutron)
api.neutron.router_get(
IsA(http.HttpRequest), router.id).AndReturn(router)
search_opts = {'router:external': True}
api.neutron.network_list(
IsA(http.HttpRequest), **search_opts).AndReturn([network])
self.mox.ReplayAll()
form_data = {'router_id': router.id,
'router_name': router.name,
'network_id': network.id}
url = reverse('horizon:%s:l3routers:setgateway' % self.DASHBOARD,
args=[router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = self.INDEX_URL
self.assertRedirectsNoFollow(res, detail_url)
class RouterRuleTests(test.TestCase):
DASHBOARD = 'project'
INDEX_URL = reverse('horizon:%s:l3routers:index' % DASHBOARD)
DETAIL_PATH = 'horizon:%s:l3routers:detail' % DASHBOARD
def _mock_external_network_get(self, router):
ext_net_id = router.external_gateway_info['network_id']
ext_net = self.networks.list()[2]
api.neutron.network_get(IsA(http.HttpRequest), ext_net_id,
expand_subnet=False).AndReturn(ext_net)
def _mock_network_list(self, tenant_id):
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=tenant_id).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
@test.create_stubs({api.neutron: ('router_get', 'port_list',
'network_get')})
def test_extension_hides_without_rules(self):
router = self.routers.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(self.routers.first())
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':l3routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertTemplateUsed(res, '%s/l3routers/detail.html' % self.DASHBOARD)
self.assertTemplateNotUsed(res,
'%s/l3routers/extensions/routerrules/grid.html' % self.DASHBOARD)
@test.create_stubs({api.neutron: ('router_get', 'port_list',
'network_get', 'network_list')})
def test_routerrule_detail(self):
router = self.routers_with_rules.first()
api.neutron.router_get(IsA(http.HttpRequest), router.id)\
.AndReturn(self.routers_with_rules.first())
api.neutron.port_list(IsA(http.HttpRequest),
device_id=router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(router)
if self.DASHBOARD == 'project':
api.neutron.network_list(
IsA(http.HttpRequest),
shared=False,
tenant_id=router['tenant_id']).AndReturn(self.networks.list())
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:%s'
':l3routers:detail' % self.DASHBOARD,
args=[router.id]))
self.assertTemplateUsed(res, '%s/l3routers/detail.html' % self.DASHBOARD)
if self.DASHBOARD == 'project':
self.assertTemplateUsed(res,
'%s/l3routers/extensions/routerrules/grid.html' % self.DASHBOARD)
rules = res.context['routerrules_table'].data
self.assertItemsEqual(rules, router['router_rules'])
def _test_router_addrouterrule(self, raise_error=False):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
rule = {'source': '1.2.3.4/32', 'destination': '4.3.2.1/32', 'id': 99,
'action': 'permit', 'nexthops': ['1.1.1.1', '2.2.2.2']}
post_router['router_rules'].insert(0, rule)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
if raise_error:
router_update.AndRaise(self.exceptions.neutron)
else:
router_update.AndReturn({'router': post_router})
self.mox.ReplayAll()
form_data = {'router_id': pre_router.id,
'source': rule['source'],
'destination': rule['destination'],
'action': rule['action'],
'nexthops': ','.join(rule['nexthops'])}
url = reverse('horizon:%s:l3routers:addrouterrule' % self.DASHBOARD,
args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
detail_url = reverse(self.DETAIL_PATH, args=[pre_router.id])
self.assertRedirectsNoFollow(res, detail_url)
@test.create_stubs({api.neutron: ('router_get',
'router_update')})
def test_router_addrouterrule(self):
self._test_router_addrouterrule()
@test.create_stubs({api.neutron: ('router_get',
'router_update')})
def test_router_addrouterrule_exception(self):
self._test_router_addrouterrule(raise_error=True)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'port_list', 'network_get')})
def test_router_removerouterrule(self):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
rule = post_router['router_rules'].pop()
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(pre_router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self.mox.ReplayAll()
form_rule_id = rule['source'] + rule['destination']
form_data = {'router_id': pre_router.id,
'action': 'routerrules__delete__%s' % form_rule_id}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
@test.create_stubs({api.neutron: ('router_get', 'router_update',
'network_list', 'port_list',
'network_get')})
def test_router_resetrouterrules(self):
pre_router = self.routers_with_rules.first()
post_router = copy.deepcopy(pre_router)
default_rules = [{'source': 'any', 'destination': 'any',
'action': 'permit', 'nexthops': [], 'id': '2'}]
del post_router['router_rules'][:]
post_router['router_rules'].extend(default_rules)
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
params = {}
params['router_rules'] = rulemanager.format_for_api(
post_router['router_rules'])
router_update = api.neutron.router_update(IsA(http.HttpRequest),
pre_router.id, **params)
router_update.AndReturn({'router': post_router})
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
api.neutron.port_list(IsA(http.HttpRequest),
device_id=pre_router.id)\
.AndReturn([self.ports.first()])
self._mock_external_network_get(pre_router)
self._mock_network_list(pre_router['tenant_id'])
api.neutron.router_get(IsA(http.HttpRequest),
pre_router.id).AndReturn(post_router)
self.mox.ReplayAll()
form_data = {'router_id': pre_router.id,
'action': 'routerrules__resetrules'}
url = reverse(self.DETAIL_PATH, args=[pre_router.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
|
|
"""
This module defines the database models for all in-game objects, that
is, all objects that has an actual existence in-game.
Each database object is 'decorated' with a 'typeclass', a normal
python class that implements all the various logics needed by the game
in question. Objects created of this class transparently communicate
with its related database object for storing all attributes. The
admin should usually not have to deal directly with this database
object layer.
Attributes are separate objects that store values persistently onto
the database object. Like everything else, they can be accessed
transparently through the decorating TypeClass.
"""
from builtins import object
from django.conf import settings
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from evennia.typeclasses.models import TypedObject
from evennia.objects.manager import ObjectDBManager
from evennia.utils import logger
from evennia.utils.utils import (make_iter, dbref, lazy_property)
class ContentsHandler(object):
"""
Handles and caches the contents of an object to avoid excessive
lookups (this is done very often due to cmdhandler needing to look
for object-cmdsets). It is stored on the 'contents_cache' property
of the ObjectDB.
"""
def __init__(self, obj):
"""
Sets up the contents handler.
Args:
obj (Object): The object on which the
handler is defined
"""
self.obj = obj
self._pkcache = {}
self._idcache = obj.__class__.__instance_cache__
self.init()
def init(self):
"""
Re-initialize the content cache
"""
self._pkcache.update(dict((obj.pk, None) for obj in
ObjectDB.objects.filter(db_location=self.obj) if obj.pk))
def get(self, exclude=None):
"""
Return the contents of the cache.
Args:
exclude (Object or list of Object): object(s) to ignore
Returns:
objects (list): the Objects inside this location
"""
if exclude:
pks = [pk for pk in self._pkcache if pk not in [excl.pk for excl in make_iter(exclude)]]
else:
pks = self._pkcache
try:
return [self._idcache[pk] for pk in pks]
except KeyError:
# this can happen if the idmapper cache was cleared for an object
# in the contents cache. If so we need to re-initialize and try again.
self.init()
try:
return [self._idcache[pk] for pk in pks]
except KeyError:
# this means an actual failure of caching. Return real database match.
logger.log_err("contents cache failed for %s." % (self.obj.key))
return list(ObjectDB.objects.filter(db_location=self.obj))
def add(self, obj):
"""
Add a new object to this location
Args:
obj (Object): object to add
"""
self._pkcache[obj.pk] = None
def remove(self, obj):
"""
Remove object from this location
Args:
obj (Object): object to remove
"""
self._pkcache.pop(obj.pk, None)
def clear(self):
"""
Clear the contents cache and re-initialize
"""
self._pkcache = {}
self.init()
#------------------------------------------------------------
#
# ObjectDB
#
#------------------------------------------------------------
class ObjectDB(TypedObject):
"""
All objects in the game use the ObjectDB model to store
data in the database. This is handled transparently through
the typeclass system.
Note that the base objectdb is very simple, with
few defined fields. Use attributes to extend your
type class with new database-stored variables.
The TypedObject supplies the following (inherited) properties:
- key - main name
- name - alias for key
- db_typeclass_path - the path to the decorating typeclass
- db_date_created - time stamp of object creation
- permissions - perm strings
- locks - lock definitions (handler)
- dbref - #id of object
- db - persistent attribute storage
- ndb - non-persistent attribute storage
The ObjectDB adds the following properties:
- player - optional connected player (always together with sessid)
- sessid - optional connection session id (always together with player)
- location - in-game location of object
- home - safety location for object (handler)
- scripts - scripts assigned to object (handler from typeclass)
- cmdset - active cmdset on object (handler from typeclass)
- aliases - aliases for this object (property)
- nicks - nicknames for *other* things in Evennia (handler)
- sessions - sessions connected to this object (see also player)
- has_player - bool if an active player is currently connected
- contents - other objects having this object as location
- exits - exits from this object
"""
#
# ObjectDB Database model setup
#
#
# inherited fields (from TypedObject):
# db_key (also 'name' works), db_typeclass_path, db_date_created,
# db_permissions
#
# These databse fields (including the inherited ones) should normally be
# managed by their corresponding wrapper properties, named same as the
# field, but without the db_* prefix (e.g. the db_key field is set with
# self.key instead). The wrappers are created at the metaclass level and
# will automatically save and cache the data more efficiently.
# If this is a character object, the player is connected here.
db_player = models.ForeignKey("players.PlayerDB", null=True, verbose_name='player', on_delete=models.SET_NULL,
help_text='a Player connected to this object, if any.')
# the session id associated with this player, if any
db_sessid = models.CommaSeparatedIntegerField(null=True, max_length=32, verbose_name="session id",
help_text="csv list of session ids of connected Player, if any.")
# The location in the game world. Since this one is likely
# to change often, we set this with the 'location' property
# to transparently handle Typeclassing.
db_location = models.ForeignKey('self', related_name="locations_set", db_index=True, on_delete=models.SET_NULL,
blank=True, null=True, verbose_name='game location')
# a safety location, this usually don't change much.
db_home = models.ForeignKey('self', related_name="homes_set", on_delete=models.SET_NULL,
blank=True, null=True, verbose_name='home location')
# destination of this object - primarily used by exits.
db_destination = models.ForeignKey('self', related_name="destinations_set", db_index=True, on_delete=models.SET_NULL,
blank=True, null=True, verbose_name='destination',
help_text='a destination, used only by exit objects.')
# database storage of persistant cmdsets.
db_cmdset_storage = models.CharField('cmdset', max_length=255, null=True, blank=True,
help_text="optional python path to a cmdset class.")
# Database manager
objects = ObjectDBManager()
# defaults
__settingsclasspath__ = settings.BASE_OBJECT_TYPECLASS
__defaultclasspath__ = "evennia.objects.objects.DefaultObject"
__applabel__ = "objects"
@lazy_property
def contents_cache(self):
return ContentsHandler(self)
# cmdset_storage property handling
def __cmdset_storage_get(self):
"getter"
storage = self.db_cmdset_storage
return [path.strip() for path in storage.split(',')] if storage else []
def __cmdset_storage_set(self, value):
"setter"
self.db_cmdset_storage = ",".join(str(val).strip() for val in make_iter(value))
self.save(update_fields=["db_cmdset_storage"])
def __cmdset_storage_del(self):
"deleter"
self.db_cmdset_storage = None
self.save(update_fields=["db_cmdset_storage"])
cmdset_storage = property(__cmdset_storage_get, __cmdset_storage_set, __cmdset_storage_del)
# location getsetter
def __location_get(self):
"Get location"
return self.db_location
def __location_set(self, location):
"Set location, checking for loops and allowing dbref"
if isinstance(location, (basestring, int)):
# allow setting of #dbref
dbid = dbref(location, reqhash=False)
if dbid:
try:
location = ObjectDB.objects.get(id=dbid)
except ObjectDoesNotExist:
# maybe it is just a name that happens to look like a dbid
pass
try:
def is_loc_loop(loc, depth=0):
"Recursively traverse target location, trying to catch a loop."
if depth > 10:
return
elif loc == self:
raise RuntimeError
elif loc == None:
raise RuntimeWarning
return is_loc_loop(loc.db_location, depth + 1)
try:
is_loc_loop(location)
except RuntimeWarning:
pass
# if we get to this point we are ready to change location
old_location = self.db_location
# this is checked in _db_db_location_post_save below
self._safe_contents_update = True
# actually set the field (this will error if location is invalid)
self.db_location = location
self.save(update_fields=["db_location"])
# remove the safe flag
del self._safe_contents_update
# update the contents cache
if old_location:
old_location.contents_cache.remove(self)
if self.db_location:
self.db_location.contents_cache.add(self)
except RuntimeError:
errmsg = "Error: %s.location = %s creates a location loop." % (self.key, location)
logger.log_trace(errmsg)
raise
except Exception as e:
errmsg = "Error (%s): %s is not a valid location." % (str(e), location)
logger.log_trace(errmsg)
raise
def __location_del(self):
"Cleanly delete the location reference"
self.db_location = None
self.save(update_fields=["db_location"])
location = property(__location_get, __location_set, __location_del)
def at_db_location_postsave(self, new):
"""
This is called automatically after the location field was
saved, no matter how. It checks for a variable
_safe_contents_update to know if the save was triggered via
the location handler (which updates the contents cache) or
not.
Args:
new (bool): Set if this location has not yet been saved before.
"""
if not hasattr(self, "_safe_contents_update"):
# changed/set outside of the location handler
if new:
# if new, there is no previous location to worry about
if self.db_location:
self.db_location.contents_cache.add(self)
else:
# Since we cannot know at this point was old_location was, we
# trigger a full-on contents_cache update here.
logger.log_warn("db_location direct save triggered contents_cache.init() for all objects!")
[o.contents_cache.init() for o in self.__dbclass__.get_all_cached_instances()]
class Meta(object):
"Define Django meta options"
verbose_name = "Object"
verbose_name_plural = "Objects"
|
|
# -*- coding: utf-8 -*-
from flexmock import flexmock, flexmock_teardown
from orator.connections import Connection
from orator.schema.grammars import SQLiteSchemaGrammar
from orator.schema.blueprint import Blueprint
from ... import OratorTestCase
class SqliteSchemaGrammarTestCase(OratorTestCase):
def tearDown(self):
flexmock_teardown()
def test_basic_create(self):
blueprint = Blueprint("users")
blueprint.create()
blueprint.increments("id")
blueprint.string("email")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'CREATE TABLE "users" ("id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, "email" VARCHAR NOT NULL)',
statements[0],
)
blueprint = Blueprint("users")
blueprint.increments("id")
blueprint.string("email")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(2, len(statements))
expected = [
'ALTER TABLE "users" ADD COLUMN "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT',
'ALTER TABLE "users" ADD COLUMN "email" VARCHAR NOT NULL',
]
self.assertEqual(expected, statements)
def test_drop_table(self):
blueprint = Blueprint("users")
blueprint.drop()
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual('DROP TABLE "users"', statements[0])
def test_drop_table_if_exists(self):
blueprint = Blueprint("users")
blueprint.drop_if_exists()
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual('DROP TABLE IF EXISTS "users"', statements[0])
def test_drop_unique(self):
blueprint = Blueprint("users")
blueprint.drop_unique("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual("DROP INDEX foo", statements[0])
def test_drop_index(self):
blueprint = Blueprint("users")
blueprint.drop_index("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual("DROP INDEX foo", statements[0])
def test_rename_table(self):
blueprint = Blueprint("users")
blueprint.rename("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual('ALTER TABLE "users" RENAME TO "foo"', statements[0])
def test_adding_foreign_key(self):
blueprint = Blueprint("users")
blueprint.create()
blueprint.string("foo").primary()
blueprint.string("order_id")
blueprint.foreign("order_id").references("id").on("orders")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
expected = (
'CREATE TABLE "users" ("foo" VARCHAR NOT NULL, "order_id" VARCHAR NOT NULL, '
'FOREIGN KEY("order_id") REFERENCES "orders"("id"), PRIMARY KEY ("foo"))'
)
self.assertEqual(expected, statements[0])
def test_adding_unique_key(self):
blueprint = Blueprint("users")
blueprint.unique("foo", "bar")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual('CREATE UNIQUE INDEX bar ON "users" ("foo")', statements[0])
def test_adding_index(self):
blueprint = Blueprint("users")
blueprint.index("foo", "bar")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual('CREATE INDEX bar ON "users" ("foo")', statements[0])
def test_adding_incrementing_id(self):
blueprint = Blueprint("users")
blueprint.increments("id")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT',
statements[0],
)
def test_adding_big_incrementing_id(self):
blueprint = Blueprint("users")
blueprint.big_increments("id")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT',
statements[0],
)
def test_adding_string(self):
blueprint = Blueprint("users")
blueprint.string("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" VARCHAR NOT NULL', statements[0]
)
blueprint = Blueprint("users")
blueprint.string("foo", 100)
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" VARCHAR NOT NULL', statements[0]
)
blueprint = Blueprint("users")
blueprint.string("foo", 100).nullable().default("bar")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" VARCHAR NULL DEFAULT \'bar\'',
statements[0],
)
def test_adding_text(self):
blueprint = Blueprint("users")
blueprint.text("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" TEXT NOT NULL', statements[0]
)
def test_adding_big_integer(self):
blueprint = Blueprint("users")
blueprint.big_integer("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" INTEGER NOT NULL', statements[0]
)
blueprint = Blueprint("users")
blueprint.big_integer("foo", True)
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT',
statements[0],
)
def test_adding_integer(self):
blueprint = Blueprint("users")
blueprint.integer("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" INTEGER NOT NULL', statements[0]
)
blueprint = Blueprint("users")
blueprint.integer("foo", True)
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT',
statements[0],
)
def test_adding_medium_integer(self):
blueprint = Blueprint("users")
blueprint.integer("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" INTEGER NOT NULL', statements[0]
)
def test_adding_tiny_integer(self):
blueprint = Blueprint("users")
blueprint.integer("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" INTEGER NOT NULL', statements[0]
)
def test_adding_small_integer(self):
blueprint = Blueprint("users")
blueprint.integer("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" INTEGER NOT NULL', statements[0]
)
def test_adding_float(self):
blueprint = Blueprint("users")
blueprint.float("foo", 5, 2)
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" FLOAT NOT NULL', statements[0]
)
def test_adding_double(self):
blueprint = Blueprint("users")
blueprint.double("foo", 15, 8)
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" FLOAT NOT NULL', statements[0]
)
def test_adding_decimal(self):
blueprint = Blueprint("users")
blueprint.decimal("foo", 5, 2)
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" NUMERIC NOT NULL', statements[0]
)
def test_adding_boolean(self):
blueprint = Blueprint("users")
blueprint.boolean("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" TINYINT NOT NULL', statements[0]
)
def test_adding_enum(self):
blueprint = Blueprint("users")
blueprint.enum("foo", ["bar", "baz"])
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" VARCHAR NOT NULL', statements[0]
)
def test_adding_date(self):
blueprint = Blueprint("users")
blueprint.date("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" DATE NOT NULL', statements[0]
)
def test_adding_datetime(self):
blueprint = Blueprint("users")
blueprint.datetime("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" DATETIME NOT NULL', statements[0]
)
def test_adding_time(self):
blueprint = Blueprint("users")
blueprint.time("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" TIME NOT NULL', statements[0]
)
def test_adding_timestamp(self):
blueprint = Blueprint("users")
blueprint.timestamp("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" DATETIME NOT NULL', statements[0]
)
def test_adding_timestamp_with_current(self):
blueprint = Blueprint("users")
blueprint.timestamp("foo").use_current()
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL',
statements[0],
)
def test_adding_timestamps(self):
blueprint = Blueprint("users")
blueprint.timestamps()
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(2, len(statements))
expected = [
'ALTER TABLE "users" ADD COLUMN "created_at" DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL',
'ALTER TABLE "users" ADD COLUMN "updated_at" DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL',
]
self.assertEqual(expected, statements)
def test_adding_timestamps_not_current(self):
blueprint = Blueprint("users")
blueprint.timestamps(use_current=False)
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(2, len(statements))
expected = [
'ALTER TABLE "users" ADD COLUMN "created_at" DATETIME NOT NULL',
'ALTER TABLE "users" ADD COLUMN "updated_at" DATETIME NOT NULL',
]
self.assertEqual(expected, statements)
def test_adding_binary(self):
blueprint = Blueprint("users")
blueprint.binary("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" BLOB NOT NULL', statements[0]
)
def test_adding_json(self):
blueprint = Blueprint("users")
blueprint.json("foo")
statements = blueprint.to_sql(self.get_connection(), self.get_grammar())
self.assertEqual(1, len(statements))
self.assertEqual(
'ALTER TABLE "users" ADD COLUMN "foo" TEXT NOT NULL', statements[0]
)
def get_connection(self):
return flexmock(Connection(None))
def get_grammar(self):
return SQLiteSchemaGrammar(self.get_connection())
|
|
"""Component for controlling Pandora stations through the pianobar client."""
from datetime import timedelta
import logging
import os
import re
import shutil
import signal
import pexpect
from homeassistant import util
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
_LOGGER = logging.getLogger(__name__)
# SUPPORT_VOLUME_SET is close to available but we need volume up/down
# controls in the GUI.
PANDORA_SUPPORT = (
SUPPORT_PAUSE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_NEXT_TRACK
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
)
CMD_MAP = {
SERVICE_MEDIA_NEXT_TRACK: "n",
SERVICE_MEDIA_PLAY_PAUSE: "p",
SERVICE_MEDIA_PLAY: "p",
SERVICE_VOLUME_UP: ")",
SERVICE_VOLUME_DOWN: "(",
}
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=2)
CURRENT_SONG_PATTERN = re.compile(r'"(.*?)"\s+by\s+"(.*?)"\son\s+"(.*?)"', re.MULTILINE)
STATION_PATTERN = re.compile(r'Station\s"(.+?)"', re.MULTILINE)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pandora media player platform."""
if not _pianobar_exists():
return False
pandora = PandoraMediaPlayer("Pandora")
# Make sure we end the pandora subprocess on exit in case user doesn't
# power it down.
def _stop_pianobar(_event):
pandora.turn_off()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_pianobar)
add_entities([pandora])
class PandoraMediaPlayer(MediaPlayerDevice):
"""A media player that uses the Pianobar interface to Pandora."""
def __init__(self, name):
"""Initialize the Pandora device."""
MediaPlayerDevice.__init__(self)
self._name = name
self._player_state = STATE_OFF
self._station = ""
self._media_title = ""
self._media_artist = ""
self._media_album = ""
self._stations = []
self._time_remaining = 0
self._media_duration = 0
self._pianobar = None
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the media player."""
return self._name
@property
def state(self):
"""Return the state of the player."""
return self._player_state
def turn_on(self):
"""Turn the media player on."""
if self._player_state != STATE_OFF:
return
self._pianobar = pexpect.spawn("pianobar")
_LOGGER.info("Started pianobar subprocess")
mode = self._pianobar.expect(
["Receiving new playlist", "Select station:", "Email:"]
)
if mode == 1:
# station list was presented. dismiss it.
self._pianobar.sendcontrol("m")
elif mode == 2:
_LOGGER.warning(
"The pianobar client is not configured to log in. "
"Please create a configuration file for it as described at "
"https://home-assistant.io/integrations/pandora/"
)
# pass through the email/password prompts to quit cleanly
self._pianobar.sendcontrol("m")
self._pianobar.sendcontrol("m")
self._pianobar.terminate()
self._pianobar = None
return
self._update_stations()
self.update_playing_status()
self._player_state = STATE_IDLE
self.schedule_update_ha_state()
def turn_off(self):
"""Turn the media player off."""
if self._pianobar is None:
_LOGGER.info("Pianobar subprocess already stopped")
return
self._pianobar.send("q")
try:
_LOGGER.debug("Stopped Pianobar subprocess")
self._pianobar.terminate()
except pexpect.exceptions.TIMEOUT:
# kill the process group
os.killpg(os.getpgid(self._pianobar.pid), signal.SIGTERM)
_LOGGER.debug("Killed Pianobar subprocess")
self._pianobar = None
self._player_state = STATE_OFF
self.schedule_update_ha_state()
def media_play(self):
"""Send play command."""
self._send_pianobar_command(SERVICE_MEDIA_PLAY_PAUSE)
self._player_state = STATE_PLAYING
self.schedule_update_ha_state()
def media_pause(self):
"""Send pause command."""
self._send_pianobar_command(SERVICE_MEDIA_PLAY_PAUSE)
self._player_state = STATE_PAUSED
self.schedule_update_ha_state()
def media_next_track(self):
"""Go to next track."""
self._send_pianobar_command(SERVICE_MEDIA_NEXT_TRACK)
self.schedule_update_ha_state()
@property
def supported_features(self):
"""Flag media player features that are supported."""
return PANDORA_SUPPORT
@property
def source(self):
"""Name of the current input source."""
return self._station
@property
def source_list(self):
"""List of available input sources."""
return self._stations
@property
def media_title(self):
"""Title of current playing media."""
self.update_playing_status()
return self._media_title
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._media_album
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
def select_source(self, source):
"""Choose a different Pandora station and play it."""
try:
station_index = self._stations.index(source)
except ValueError:
_LOGGER.warning("Station %s is not in list", source)
return
_LOGGER.debug("Setting station %s, %d", source, station_index)
self._send_station_list_command()
self._pianobar.sendline(f"{station_index}")
self._pianobar.expect("\r\n")
self._player_state = STATE_PLAYING
def _send_station_list_command(self):
"""Send a station list command."""
self._pianobar.send("s")
try:
self._pianobar.expect("Select station:", timeout=1)
except pexpect.exceptions.TIMEOUT:
# try again. Buffer was contaminated.
self._clear_buffer()
self._pianobar.send("s")
self._pianobar.expect("Select station:")
def update_playing_status(self):
"""Query pianobar for info about current media_title, station."""
response = self._query_for_playing_status()
if not response:
return
self._update_current_station(response)
self._update_current_song(response)
self._update_song_position()
def _query_for_playing_status(self):
"""Query system for info about current track."""
self._clear_buffer()
self._pianobar.send("i")
try:
match_idx = self._pianobar.expect(
[
br"(\d\d):(\d\d)/(\d\d):(\d\d)",
"No song playing",
"Select station",
"Receiving new playlist",
]
)
except pexpect.exceptions.EOF:
_LOGGER.info("Pianobar process already exited")
return None
self._log_match()
if match_idx == 1:
# idle.
response = None
elif match_idx == 2:
# stuck on a station selection dialog. Clear it.
_LOGGER.warning("On unexpected station list page")
self._pianobar.sendcontrol("m") # press enter
self._pianobar.sendcontrol("m") # do it again b/c an 'i' got in
# pylint: disable=assignment-from-none
response = self.update_playing_status()
elif match_idx == 3:
_LOGGER.debug("Received new playlist list")
# pylint: disable=assignment-from-none
response = self.update_playing_status()
else:
response = self._pianobar.before.decode("utf-8")
return response
def _update_current_station(self, response):
"""Update current station."""
station_match = re.search(STATION_PATTERN, response)
if station_match:
self._station = station_match.group(1)
_LOGGER.debug("Got station as: %s", self._station)
else:
_LOGGER.warning("No station match")
def _update_current_song(self, response):
"""Update info about current song."""
song_match = re.search(CURRENT_SONG_PATTERN, response)
if song_match:
(
self._media_title,
self._media_artist,
self._media_album,
) = song_match.groups()
_LOGGER.debug("Got song as: %s", self._media_title)
else:
_LOGGER.warning("No song match")
@util.Throttle(MIN_TIME_BETWEEN_UPDATES)
def _update_song_position(self):
"""
Get the song position and duration.
It's hard to predict whether or not the music will start during init
so we have to detect state by checking the ticker.
"""
(
cur_minutes,
cur_seconds,
total_minutes,
total_seconds,
) = self._pianobar.match.groups()
time_remaining = int(cur_minutes) * 60 + int(cur_seconds)
self._media_duration = int(total_minutes) * 60 + int(total_seconds)
if time_remaining not in (self._time_remaining, self._media_duration):
self._player_state = STATE_PLAYING
elif self._player_state == STATE_PLAYING:
self._player_state = STATE_PAUSED
self._time_remaining = time_remaining
def _log_match(self):
"""Log grabbed values from console."""
_LOGGER.debug(
"Before: %s\nMatch: %s\nAfter: %s",
repr(self._pianobar.before),
repr(self._pianobar.match),
repr(self._pianobar.after),
)
def _send_pianobar_command(self, service_cmd):
"""Send a command to Pianobar."""
command = CMD_MAP.get(service_cmd)
_LOGGER.debug("Sending pinaobar command %s for %s", command, service_cmd)
if command is None:
_LOGGER.info("Command %s not supported yet", service_cmd)
self._clear_buffer()
self._pianobar.sendline(command)
def _update_stations(self):
"""List defined Pandora stations."""
self._send_station_list_command()
station_lines = self._pianobar.before.decode("utf-8")
_LOGGER.debug("Getting stations: %s", station_lines)
self._stations = []
for line in station_lines.split("\r\n"):
match = re.search(r"\d+\).....(.+)", line)
if match:
station = match.group(1).strip()
_LOGGER.debug("Found station %s", station)
self._stations.append(station)
else:
_LOGGER.debug("No station match on %s", line)
self._pianobar.sendcontrol("m") # press enter with blank line
self._pianobar.sendcontrol("m") # do it twice in case an 'i' got in
def _clear_buffer(self):
"""
Clear buffer from pexpect.
This is necessary because there are a bunch of 00:00 in the buffer
"""
try:
while not self._pianobar.expect(".+", timeout=0.1):
pass
except pexpect.exceptions.TIMEOUT:
pass
except pexpect.exceptions.EOF:
pass
def _pianobar_exists():
"""Verify that Pianobar is properly installed."""
pianobar_exe = shutil.which("pianobar")
if pianobar_exe:
return True
_LOGGER.warning(
"The Pandora integration depends on the Pianobar client, which "
"cannot be found. Please install using instructions at "
"https://home-assistant.io/components/media_player.pandora/"
)
return False
|
|
# Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
try:
import mock # Python 2
except ImportError:
from unittest import mock # Python 3
from nose.tools import raises
from ryu.services.protocols.bgp import bgpspeaker
LOG = logging.getLogger(__name__)
class Test_BGPSpeaker(unittest.TestCase):
"""
Test case for bgp.bgpspeaker.BGPSpeaker
"""
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_mac_ip_adv(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'esi': esi,
'ethernet_tag_id': ethernet_tag_id,
'mac_addr': mac_addr,
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_multicast_etag(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_multicast_etag_no_next_hop(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '0.0.0.0' # the default value
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
'next_hop': next_hop,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
# next_hop=next_hop, # omitted
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', **expected_kwargs)
@raises(ValueError)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_add_invalid_route_type(self, mock_call):
# Prepare test data
route_type = 'foobar' # Invalid EVPN route type
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
next_hop = '10.0.0.1'
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_add(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
next_hop=next_hop,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.add_local', 'Invalid arguments detected')
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_mac_ip_adv(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MAC_IP_ADV_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
'esi': esi,
'ethernet_tag_id': ethernet_tag_id,
'mac_addr': mac_addr,
'ip_addr': ip_addr,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', **expected_kwargs)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_multicast_etag(self, mock_call):
# Prepare test data
route_type = bgpspeaker.EVPN_MULTICAST_ETAG_ROUTE
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
expected_kwargs = {
'route_type': route_type,
'route_dist': route_dist,
# 'esi': esi, # should be ignored
'ethernet_tag_id': ethernet_tag_id,
# 'mac_addr': mac_addr, # should be ignored
'ip_addr': ip_addr,
}
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', **expected_kwargs)
@raises(ValueError)
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.BGPSpeaker.__init__',
mock.MagicMock(return_value=None))
@mock.patch('ryu.services.protocols.bgp.bgpspeaker.call')
def test_evpn_prefix_del_invalid_route_type(self, mock_call):
# Prepare test data
route_type = 'foobar' # Invalid EVPN route type
route_dist = '65000:100'
esi = 0 # denotes single-homed
ethernet_tag_id = 200
mac_addr = 'aa:bb:cc:dd:ee:ff'
ip_addr = '192.168.0.1'
# Test
speaker = bgpspeaker.BGPSpeaker(65000, '10.0.0.1')
speaker.evpn_prefix_del(
route_type=route_type,
route_dist=route_dist,
esi=esi,
ethernet_tag_id=ethernet_tag_id,
mac_addr=mac_addr,
ip_addr=ip_addr,
)
# Check
mock_call.assert_called_with(
'evpn_prefix.delete_local', 'Invalid arguments detected')
|
|
#!/usr/bin/env python
import copy
import unittest
from nose.tools import *
import networkx
from test_graph import TestGraph
class TestMultiGraph(TestGraph):
def setUp(self):
self.Graph=networkx.MultiGraph
# build K3
ed1,ed2,ed3 = ({0:{}},{0:{}},{0:{}})
self.k3adj={0: {1: ed1, 2: ed2},
1: {0: ed1, 2: ed3},
2: {0: ed2, 1: ed3}}
self.k3edges=[(0, 1), (0, 2), (1, 2)]
self.k3nodes=[0, 1, 2]
self.K3=self.Graph()
self.K3.adj = self.K3.edge = self.k3adj
self.K3.node={}
self.K3.node[0]={}
self.K3.node[1]={}
self.K3.node[2]={}
def test_data_input(self):
pass
# G=self.Graph(data={1:[2],2:[1]}, name="test")
# assert_equal(G.name,"test")
# assert_equal(sorted(G.adj.items()),[(1, {2: [1]}), (2, {1: [1]})])
def test_contains(self):
G=self.K3
assert(1 in G )
assert(4 not in G )
assert('b' not in G )
assert([] not in G ) # no exception for nonhashable
assert({1:1} not in G) # no exception for nonhashable
def test_order(self):
G=self.K3
assert_equal(len(G),3)
assert_equal(G.order(),3)
assert_equal(G.number_of_nodes(),3)
def test_getitem(self):
G=self.K3
assert_equal(G[0],{1: {0:{}}, 2: {0:{}}})
assert_raises(KeyError, G.__getitem__, 'j')
assert_raises((TypeError,networkx.NetworkXError), G.__getitem__, ['A'])
def test_remove_node(self):
G=self.K3
G.remove_node(0)
assert_equal(G.adj,{1:{2:{0:{}}},2:{1:{0:{}}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_node,-1)
def test_add_edge(self):
G=self.Graph()
G.add_edge(0,1)
assert_equal(G.adj,{0: {1: {0:{}}}, 1: {0: {0:{}}}})
G=self.Graph()
G.add_edge(*(0,1))
assert_equal(G.adj,{0: {1: {0:{}}}, 1: {0: {0:{}}}})
def test_add_edges_from(self):
G=self.Graph()
G.add_edges_from([(0,1),(0,1,{'weight':3})])
assert_equal(G.adj,{0: {1: {0:{},1:{'weight':3}}},
1: {0: {0:{},1:{'weight':3}}}})
G.add_edges_from([(0,1),(0,1,{'weight':3})],weight=2)
assert_equal(G.adj,{0: {1: {0:{},1:{'weight':3},
2:{'weight':2},3:{'weight':3}}},
1: {0: {0:{},1:{'weight':3},
2:{'weight':2},3:{'weight':3}}}})
# too few in tuple
assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)])
# too many in tuple
assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3,4)])
assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
def test_remove_edge(self):
G=self.K3
G.remove_edge(0,1)
assert_equal(G.adj,{0: {2: {0: {}}},
1: {2: {0: {}}},
2: {0: {0: {}},
1: {0: {}}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
def test_remove_edges_from(self):
G=self.K3
G.remove_edges_from([(0,1)])
assert_equal(G.adj,{0:{2:{0:{}}},1:{2:{0:{}}},2:{0:{0:{}},1:{0:{}}}})
G.remove_edges_from([(0,0)]) # silent fail
def test_remove_multiedge(self):
G=self.K3
G.add_edge(0,1,key='parallel edge')
G.remove_edge(0,1,key='parallel edge')
assert_equal(G.adj,{0: {1: {0:{}}, 2: {0:{}}},
1: {0: {0:{}}, 2: {0:{}}},
2: {0: {0:{}}, 1: {0:{}}}})
G.remove_edge(0,1)
assert_equal(G.adj,{0:{2:{0:{}}},1:{2:{0:{}}},2:{0:{0:{}},1:{0:{}}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
def test_get_edge_data(self):
G=self.K3
assert_equal(G.get_edge_data(0,1),{0:{}})
assert_equal(G[0][1],{0:{}})
assert_equal(G[0][1][0],{})
assert_equal(G.get_edge_data(10,20),None)
# assert_raises((KeyError,networkx.NetworkXError), G.get_edge,-1,0)
def test_adjacency_iter(self):
G=self.K3
assert_equal(dict(G.adjacency_iter()),
{0: {1: {0:{}}, 2: {0:{}}},
1: {0: {0:{}}, 2: {0:{}}},
2: {0: {0:{}}, 1: {0:{}}}})
def is_deepcopy(self,H,G):
self.graphs_equal_but_different(H,G)
# graph
assert_equal(G.graph['foo'],H.graph['foo'])
G.graph['foo'].append(1)
assert_not_equal(G.graph['foo'],H.graph['foo'])
# node
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
G.node[0]['foo'].append(1)
assert_not_equal(G.node[0]['foo'],H.node[0]['foo'])
# edge
assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
G[1][2][0]['foo'].append(1)
assert_not_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
def is_shallow_copy(self,H,G):
self.graphs_equal_but_different(H,G)
# graph
assert_equal(G.graph['foo'],H.graph['foo'])
G.graph['foo'].append(1)
assert_equal(G.graph['foo'],H.graph['foo'])
# node
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
G.node[0]['foo'].append(1)
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
# edge
assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
G[1][2][0]['foo'].append(1)
assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
def change_attr(self, H, G):
# used by graph_equal_but_different
old_foo=H[1][2][0]['foo']
H.add_edge(1,2,0,foo='baz')
assert_not_equal(G.edge,H.edge)
H.add_edge(1,2,0,foo=old_foo)
assert_equal(G.edge,H.edge)
HH=H.copy()
H.add_edge(1,2,foo='baz')
assert_not_equal(G.edge,H.edge)
H=HH
old_foo=H.node[0]['foo']
H.node[0]['foo']='baz'
assert_not_equal(G.node,H.node)
H.node[0]['foo']=old_foo
assert_equal(G.node,H.node)
def test_to_undirected(self):
G=self.K3
self.add_attributes(G)
H=networkx.MultiGraph(G)
self.is_shallow_copy(H,G)
H=G.to_undirected()
self.is_deepcopy(H,G)
def test_to_directed(self):
G=self.K3
self.add_attributes(G)
H=networkx.MultiDiGraph(G)
self.is_shallow_copy(H,G)
H=G.to_directed()
self.is_deepcopy(H,G)
def test_selfloops(self):
G=self.K3
G.add_edge(0,0)
assert_equal(G.nodes_with_selfloops(),[0])
assert_equal(G.selfloop_edges(),[(0,0)])
assert_equal(G.selfloop_edges(data=True),[(0,0,{})])
assert_equal(G.number_of_selfloops(),1)
def test_selfloops2(self):
G=self.K3
G.add_edge(0,0)
G.add_edge(0,0)
G.add_edge(0,0,key='parallel edge')
G.remove_edge(0,0,key='parallel edge')
assert_equal(G.number_of_edges(0,0),2)
G.remove_edge(0,0)
assert_equal(G.number_of_edges(0,0),1)
# G.add_edge(1,1)
# G.remove_node(1)
# G.add_edge(0,0)
# G.add_edge(1,1)
# G.remove_nodes_from([0,1])
def test_edge_attr4(self):
G=self.Graph()
G.add_edge(1,2,key=0,data=7,spam='bar',bar='foo')
assert_equal(G.edges(data=True),
[(1,2,{'data':7,'spam':'bar','bar':'foo'})])
G[1][2][0]['data']=10 # OK to set data like this
assert_equal(G.edges(data=True),
[(1,2,{'data':10,'spam':'bar','bar':'foo'})])
G.edge[1][2][0]['data']=20 # another spelling, "edge"
assert_equal(G.edges(data=True),
[(1,2,{'data':20,'spam':'bar','bar':'foo'})])
G.edge[1][2][0]['listdata']=[20,200]
G.edge[1][2][0]['weight']=20
assert_equal(G.edges(data=True),
[(1,2,{'data':20,'spam':'bar',
'bar':'foo','listdata':[20,200],'weight':20})])
|
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <[email protected]>
import libxml2
import re, sys, string
import typeexpr
def parse_GL_API( file_name, factory = None ):
doc = libxml2.readFile( file_name, None, libxml2.XML_PARSE_XINCLUDE + libxml2.XML_PARSE_NOBLANKS + libxml2.XML_PARSE_DTDVALID + libxml2.XML_PARSE_DTDATTR + libxml2.XML_PARSE_DTDLOAD + libxml2.XML_PARSE_NOENT )
ret = doc.xincludeProcess()
if not factory:
factory = gl_item_factory()
api = factory.create_item( "api", None, None )
api.process_element( doc )
# After the XML has been processed, we need to go back and assign
# dispatch offsets to the functions that request that their offsets
# be assigned by the scripts. Typically this means all functions
# that are not part of the ABI.
for func in api.functionIterateByCategory():
if func.assign_offset:
func.offset = api.next_offset;
api.next_offset += 1
doc.freeDoc()
return api
def is_attr_true( element, name ):
"""Read a name value from an element's attributes.
The value read from the attribute list must be either 'true' or
'false'. If the value is 'false', zero will be returned. If the
value is 'true', non-zero will be returned. An exception will be
raised for any other value."""
value = element.nsProp( name, None )
if value == "true":
return 1
elif value == "false":
return 0
else:
raise RuntimeError('Invalid value "%s" for boolean "%s".' % (value, name))
class gl_print_base:
"""Base class of all API pretty-printers.
In the model-view-controller pattern, this is the view. Any derived
class will want to over-ride the printBody, printRealHader, and
printRealFooter methods. Some derived classes may want to over-ride
printHeader and printFooter, or even Print (though this is unlikely).
"""
def __init__(self):
# Name of the script that is generating the output file.
# Every derived class should set this to the name of its
# source file.
self.name = "a"
# License on the *generated* source file. This may differ
# from the license on the script that is generating the file.
# Every derived class should set this to some reasonable
# value.
#
# See license.py for an example of a reasonable value.
self.license = "The license for this file is unspecified."
# The header_tag is the name of the C preprocessor define
# used to prevent multiple inclusion. Typically only
# generated C header files need this to be set. Setting it
# causes code to be generated automatically in printHeader
# and printFooter.
self.header_tag = None
# List of file-private defines that must be undefined at the
# end of the file. This can be used in header files to define
# names for use in the file, then undefine them at the end of
# the header file.
self.undef_list = []
return
def Print(self, api):
self.printHeader()
self.printBody(api)
self.printFooter()
return
def printHeader(self):
"""Print the header associated with all files and call the printRealHeader method."""
print '/* DO NOT EDIT - This file generated automatically by %s script */' \
% (self.name)
print ''
print '/*'
print ' * ' + self.license.replace('\n', '\n * ')
print ' */'
print ''
if self.header_tag:
print '#if !defined( %s )' % (self.header_tag)
print '# define %s' % (self.header_tag)
print ''
self.printRealHeader();
return
def printFooter(self):
"""Print the header associated with all files and call the printRealFooter method."""
self.printRealFooter()
if self.undef_list:
print ''
for u in self.undef_list:
print "# undef %s" % (u)
if self.header_tag:
print ''
print '#endif /* !defined( %s ) */' % (self.header_tag)
def printRealHeader(self):
"""Print the "real" header for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printRealFooter(self):
"""Print the "real" footer for the created file.
In the base class, this function is empty. All derived
classes should over-ride this function."""
return
def printPure(self):
"""Conditionally define `PURE' function attribute.
Conditionally defines a preprocessor macro `PURE' that wraps
GCC's `pure' function attribute. The conditional code can be
easilly adapted to other compilers that support a similar
feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("PURE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define PURE __attribute__((pure))
# else
# define PURE
# endif"""
return
def printFastcall(self):
"""Conditionally define `FASTCALL' function attribute.
Conditionally defines a preprocessor macro `FASTCALL' that
wraps GCC's `fastcall' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("FASTCALL")
print """# if defined(__i386__) && defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)
# define FASTCALL __attribute__((fastcall))
# else
# define FASTCALL
# endif"""
return
def printVisibility(self, S, s):
"""Conditionally define visibility function attribute.
Conditionally defines a preprocessor macro name S that wraps
GCC's visibility function attribute. The visibility used is
the parameter s. The conditional code can be easilly adapted
to other compilers that support a similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append(S)
print """# if (defined(__GNUC__) && !defined(__CYGWIN__) && !defined(__MINGW32__)) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) && defined(__ELF__))
# define %s __attribute__((visibility("%s")))
# else
# define %s
# endif""" % (S, s, S)
return
def printNoinline(self):
"""Conditionally define `NOINLINE' function attribute.
Conditionally defines a preprocessor macro `NOINLINE' that
wraps GCC's `noinline' function attribute. The conditional
code can be easilly adapted to other compilers that support a
similar feature.
The name is also added to the file's undef_list.
"""
self.undef_list.append("NOINLINE")
print """# if defined(__GNUC__) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
# define NOINLINE __attribute__((noinline))
# else
# define NOINLINE
# endif"""
return
def real_function_name(element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if alias:
return alias
else:
return name
def real_category_name(c):
if re.compile("[1-9][0-9]*[.][0-9]+").match(c):
return "GL_VERSION_" + c.replace(".", "_")
else:
return c
def classify_category(name, number):
"""Based on the category name and number, select a numerical class for it.
Categories are divided into four classes numbered 0 through 3. The
classes are:
0. Core GL versions, sorted by version number.
1. ARB extensions, sorted by extension number.
2. Non-ARB extensions, sorted by extension number.
3. Un-numbered extensions, sorted by extension name.
"""
try:
core_version = float(name)
except Exception,e:
core_version = 0.0
if core_version > 0.0:
cat_type = 0
key = name
elif name.startswith("GL_ARB_") or name.startswith("GLX_ARB_") or name.startswith("WGL_ARB_"):
cat_type = 1
key = int(number)
else:
if number != None:
cat_type = 2
key = int(number)
else:
cat_type = 3
key = name
return [cat_type, key]
def create_parameter_string(parameters, include_names):
"""Create a parameter string from a list of gl_parameters."""
list = []
for p in parameters:
if p.is_padding:
continue
if include_names:
list.append( p.string() )
else:
list.append( p.type_string() )
if len(list) == 0: list = ["void"]
return string.join(list, ", ")
class gl_item:
def __init__(self, element, context):
self.context = context
self.name = element.nsProp( "name", None )
self.category = real_category_name( element.parent.nsProp( "name", None ) )
return
class gl_type( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.size = int( element.nsProp( "size", None ), 0 )
te = typeexpr.type_expression( None )
tn = typeexpr.type_node()
tn.size = int( element.nsProp( "size", None ), 0 )
tn.integer = not is_attr_true( element, "float" )
tn.unsigned = is_attr_true( element, "unsigned" )
tn.name = "GL" + self.name
te.set_base_type_node( tn )
self.type_expr = te
return
def get_type_expression(self):
return self.type_expr
class gl_enum( gl_item ):
def __init__(self, element, context):
gl_item.__init__(self, element, context)
self.value = int( element.nsProp( "value", None ), 0 )
temp = element.nsProp( "count", None )
if not temp or temp == "?":
self.default_count = -1
else:
try:
c = int(temp)
except Exception,e:
raise RuntimeError('Invalid count value "%s" for enum "%s" in function "%s" when an integer was expected.' % (temp, self.name, n))
self.default_count = c
return
def priority(self):
"""Calculate a 'priority' for this enum name.
When an enum is looked up by number, there may be many
possible names, but only one is the 'prefered' name. The
priority is used to select which name is the 'best'.
Highest precedence is given to core GL name. ARB extension
names have the next highest, followed by EXT extension names.
Vendor extension names are the lowest.
"""
if self.name.endswith( "_BIT" ):
bias = 1
else:
bias = 0
if self.category.startswith( "GL_VERSION_" ):
priority = 0
elif self.category.startswith( "GL_ARB_" ):
priority = 2
elif self.category.startswith( "GL_EXT_" ):
priority = 4
else:
priority = 6
return priority + bias
class gl_parameter:
def __init__(self, element, context):
self.name = element.nsProp( "name", None )
ts = element.nsProp( "type", None )
self.type_expr = typeexpr.type_expression( ts, context )
temp = element.nsProp( "variable_param", None )
if temp:
self.count_parameter_list = temp.split( ' ' )
else:
self.count_parameter_list = []
# The count tag can be either a numeric string or the name of
# a variable. If it is the name of a variable, the int(c)
# statement will throw an exception, and the except block will
# take over.
c = element.nsProp( "count", None )
try:
count = int(c)
self.count = count
self.counter = None
except Exception,e:
count = 1
self.count = 0
self.counter = c
self.count_scale = int(element.nsProp( "count_scale", None ))
elements = (count * self.count_scale)
if elements == 1:
elements = 0
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (before)*/' % (self.name, self.type_expr.get_stack_size())
# print '/* # elements = %u */' % (elements)
self.type_expr.set_elements( elements )
#if ts == "GLdouble":
# print '/* stack size -> %s = %u (after) */' % (self.name, self.type_expr.get_stack_size())
self.is_client_only = is_attr_true( element, 'client_only' )
self.is_counter = is_attr_true( element, 'counter' )
self.is_output = is_attr_true( element, 'output' )
# Pixel data has special parameters.
self.width = element.nsProp('img_width', None)
self.height = element.nsProp('img_height', None)
self.depth = element.nsProp('img_depth', None)
self.extent = element.nsProp('img_extent', None)
self.img_xoff = element.nsProp('img_xoff', None)
self.img_yoff = element.nsProp('img_yoff', None)
self.img_zoff = element.nsProp('img_zoff', None)
self.img_woff = element.nsProp('img_woff', None)
self.img_format = element.nsProp('img_format', None)
self.img_type = element.nsProp('img_type', None)
self.img_target = element.nsProp('img_target', None)
self.img_pad_dimensions = is_attr_true( element, 'img_pad_dimensions' )
self.img_null_flag = is_attr_true( element, 'img_null_flag' )
self.img_send_null = is_attr_true( element, 'img_send_null' )
self.is_padding = is_attr_true( element, 'padding' )
return
def compatible(self, other):
return 1
def is_array(self):
return self.is_pointer()
def is_pointer(self):
return self.type_expr.is_pointer()
def is_image(self):
if self.width:
return 1
else:
return 0
def is_variable_length(self):
return len(self.count_parameter_list) or self.counter
def is_64_bit(self):
count = self.type_expr.get_element_count()
if count:
if (self.size() / count) == 8:
return 1
else:
if self.size() == 8:
return 1
return 0
def string(self):
return self.type_expr.original_string + " " + self.name
def type_string(self):
return self.type_expr.original_string
def get_base_type_string(self):
return self.type_expr.get_base_name()
def get_dimensions(self):
if not self.width:
return [ 0, "0", "0", "0", "0" ]
dim = 1
w = self.width
h = "1"
d = "1"
e = "1"
if self.height:
dim = 2
h = self.height
if self.depth:
dim = 3
d = self.depth
if self.extent:
dim = 4
e = self.extent
return [ dim, w, h, d, e ]
def get_stack_size(self):
return self.type_expr.get_stack_size()
def size(self):
if self.is_image():
return 0
else:
return self.type_expr.get_element_size()
def get_element_count(self):
c = self.type_expr.get_element_count()
if c == 0:
return 1
return c
def size_string(self, use_parens = 1):
s = self.size()
if self.counter or self.count_parameter_list:
list = [ "compsize" ]
if self.counter and self.count_parameter_list:
list.append( self.counter )
elif self.counter:
list = [ self.counter ]
if s > 1:
list.append( str(s) )
if len(list) > 1 and use_parens :
return "(%s)" % (string.join(list, " * "))
else:
return string.join(list, " * ")
elif self.is_image():
return "compsize"
else:
return str(s)
def format_string(self):
if self.type_expr.original_string == "GLenum":
return "0x%x"
else:
return self.type_expr.format_string()
class gl_function( gl_item ):
def __init__(self, element, context):
self.context = context
self.name = None
self.entry_points = []
self.return_type = "void"
self.parameters = []
self.offset = -1
self.initialized = 0
self.images = []
self.assign_offset = 0
self.static_entry_points = []
# Track the parameter string (for the function prototype)
# for each entry-point. This is done because some functions
# change their prototype slightly when promoted from extension
# to ARB extension to core. glTexImage3DEXT and glTexImage3D
# are good examples of this. Scripts that need to generate
# code for these differing aliases need to real prototype
# for each entry-point. Otherwise, they may generate code
# that won't compile.
self.entry_point_parameters = {}
self.process_element( element )
return
def process_element(self, element):
name = element.nsProp( "name", None )
alias = element.nsProp( "alias", None )
if is_attr_true(element, "static_dispatch"):
self.static_entry_points.append(name)
self.entry_points.append( name )
if alias:
true_name = alias
else:
true_name = name
# Only try to set the offset when a non-alias
# entry-point is being processes.
offset = element.nsProp( "offset", None )
if offset:
try:
o = int( offset )
self.offset = o
except Exception, e:
self.offset = -1
if offset == "assign":
self.assign_offset = 1
if not self.name:
self.name = true_name
elif self.name != true_name:
raise RuntimeError("Function true name redefined. Was %s, now %s." % (self.name, true_name))
# There are two possible cases. The first time an entry-point
# with data is seen, self.initialized will be 0. On that
# pass, we just fill in the data. The next time an
# entry-point with data is seen, self.initialized will be 1.
# On that pass we have to make that the new values match the
# valuse from the previous entry-point.
parameters = []
return_type = "void"
child = element.children
while child:
if child.type == "element":
if child.name == "return":
return_type = child.nsProp( "type", None )
elif child.name == "param":
param = self.context.factory.create_item( "parameter", child, self.context)
parameters.append( param )
child = child.next
if self.initialized:
if self.return_type != return_type:
raise RuntimeError( "Return type changed in %s. Was %s, now %s." % (name, self.return_type, return_type))
if len(parameters) != len(self.parameters):
raise RuntimeError( "Parameter count mismatch in %s. Was %d, now %d." % (name, len(self.parameters), len(parameters)))
for j in range(0, len(parameters)):
p1 = parameters[j]
p2 = self.parameters[j]
if not p1.compatible( p2 ):
raise RuntimeError( 'Parameter type mismatch in %s. "%s" was "%s", now "%s".' % (name, p2.name, p2.type_expr.original_string, p1.type_expr.original_string))
if true_name == name or not self.initialized:
self.return_type = return_type
self.parameters = parameters
for param in self.parameters:
if param.is_image():
self.images.append( param )
if element.children:
self.initialized = 1
self.entry_point_parameters[name] = parameters
else:
self.entry_point_parameters[name] = []
return
def filter_entry_points(self, entry_point_list):
"""Filter out entry points not in entry_point_list."""
if not self.initialized:
raise RuntimeError('%s is not initialized yet' % self.name)
entry_points = []
for ent in self.entry_points:
if ent not in entry_point_list:
if ent in self.static_entry_points:
self.static_entry_points.remove(ent)
self.entry_point_parameters.pop(ent)
else:
entry_points.append(ent)
if not entry_points:
raise RuntimeError('%s has no entry point after filtering' % self.name)
self.entry_points = entry_points
if self.name not in entry_points:
# use the first remaining entry point
self.name = entry_points[0]
self.parameters = self.entry_point_parameters[entry_points[0]]
def get_images(self):
"""Return potentially empty list of input images."""
return self.images
def parameterIterator(self):
return self.parameters.__iter__();
def get_parameter_string(self, entrypoint = None):
if entrypoint:
params = self.entry_point_parameters[ entrypoint ]
else:
params = self.parameters
return create_parameter_string( params, 1 )
def get_called_parameter_string(self):
p_string = ""
comma = ""
for p in self.parameterIterator():
p_string = p_string + comma + p.name
comma = ", "
return p_string
def is_abi(self):
return (self.offset >= 0 and not self.assign_offset)
def is_static_entry_point(self, name):
return name in self.static_entry_points
def dispatch_name(self):
if self.name in self.static_entry_points:
return self.name
else:
return "_dispatch_stub_%u" % (self.offset)
def static_name(self, name):
if name in self.static_entry_points:
return name
else:
return "_dispatch_stub_%u" % (self.offset)
class gl_item_factory:
"""Factory to create objects derived from gl_item."""
def create_item(self, item_name, element, context):
if item_name == "function":
return gl_function(element, context)
if item_name == "type":
return gl_type(element, context)
elif item_name == "enum":
return gl_enum(element, context)
elif item_name == "parameter":
return gl_parameter(element, context)
elif item_name == "api":
return gl_api(self)
else:
return None
class gl_api:
def __init__(self, factory):
self.functions_by_name = {}
self.enums_by_name = {}
self.types_by_name = {}
self.category_dict = {}
self.categories = [{}, {}, {}, {}]
self.factory = factory
self.next_offset = 0
typeexpr.create_initial_types()
return
def filter_functions(self, entry_point_list):
"""Filter out entry points not in entry_point_list."""
functions_by_name = {}
for func in self.functions_by_name.itervalues():
entry_points = [ent for ent in func.entry_points if ent in entry_point_list]
if entry_points:
func.filter_entry_points(entry_points)
functions_by_name[func.name] = func
self.functions_by_name = functions_by_name
def process_element(self, doc):
element = doc.children
while element.type != "element" or element.name != "OpenGLAPI":
element = element.next
if element:
self.process_OpenGLAPI(element)
return
def process_OpenGLAPI(self, element):
child = element.children
while child:
if child.type == "element":
if child.name == "category":
self.process_category( child )
elif child.name == "OpenGLAPI":
self.process_OpenGLAPI( child )
child = child.next
return
def process_category(self, cat):
cat_name = cat.nsProp( "name", None )
cat_number = cat.nsProp( "number", None )
[cat_type, key] = classify_category(cat_name, cat_number)
self.categories[cat_type][key] = [cat_name, cat_number]
child = cat.children
while child:
if child.type == "element":
if child.name == "function":
func_name = real_function_name( child )
temp_name = child.nsProp( "name", None )
self.category_dict[ temp_name ] = [cat_name, cat_number]
if self.functions_by_name.has_key( func_name ):
func = self.functions_by_name[ func_name ]
func.process_element( child )
else:
func = self.factory.create_item( "function", child, self )
self.functions_by_name[ func_name ] = func
if func.offset >= self.next_offset:
self.next_offset = func.offset + 1
elif child.name == "enum":
enum = self.factory.create_item( "enum", child, self )
self.enums_by_name[ enum.name ] = enum
elif child.name == "type":
t = self.factory.create_item( "type", child, self )
self.types_by_name[ "GL" + t.name ] = t
child = child.next
return
def functionIterateByCategory(self, cat = None):
"""Iterate over functions by category.
If cat is None, all known functions are iterated in category
order. See classify_category for details of the ordering.
Within a category, functions are sorted by name. If cat is
not None, then only functions in that category are iterated.
"""
lists = [{}, {}, {}, {}]
for func in self.functionIterateAll():
[cat_name, cat_number] = self.category_dict[func.name]
if (cat == None) or (cat == cat_name):
[func_cat_type, key] = classify_category(cat_name, cat_number)
if not lists[func_cat_type].has_key(key):
lists[func_cat_type][key] = {}
lists[func_cat_type][key][func.name] = func
functions = []
for func_cat_type in range(0,4):
keys = lists[func_cat_type].keys()
keys.sort()
for key in keys:
names = lists[func_cat_type][key].keys()
names.sort()
for name in names:
functions.append(lists[func_cat_type][key][name])
return functions.__iter__()
def functionIterateByOffset(self):
max_offset = -1
for func in self.functions_by_name.itervalues():
if func.offset > max_offset:
max_offset = func.offset
temp = [None for i in range(0, max_offset + 1)]
for func in self.functions_by_name.itervalues():
if func.offset != -1:
temp[ func.offset ] = func
list = []
for i in range(0, max_offset + 1):
if temp[i]:
list.append(temp[i])
return list.__iter__();
def functionIterateAll(self):
return self.functions_by_name.itervalues()
def enumIterateByName(self):
keys = self.enums_by_name.keys()
keys.sort()
list = []
for enum in keys:
list.append( self.enums_by_name[ enum ] )
return list.__iter__()
def categoryIterate(self):
"""Iterate over categories.
Iterate over all known categories in the order specified by
classify_category. Each iterated value is a tuple of the
name and number (which may be None) of the category.
"""
list = []
for cat_type in range(0,4):
keys = self.categories[cat_type].keys()
keys.sort()
for key in keys:
list.append(self.categories[cat_type][key])
return list.__iter__()
def get_category_for_name( self, name ):
if self.category_dict.has_key(name):
return self.category_dict[name]
else:
return ["<unknown category>", None]
def typeIterate(self):
return self.types_by_name.itervalues()
def find_type( self, type_name ):
if type_name in self.types_by_name:
return self.types_by_name[ type_name ].type_expr
else:
print "Unable to find base type matching \"%s\"." % (type_name)
return None
|
|
from unittest import TestCase
import filters as f
from filters.test import BaseFilterTestCase
from iota import Address, Iota, AsyncIota
from iota.adapter import MockAdapter, async_return
from iota.commands.extended.is_reattachable import IsReattachableCommand
from test import patch, MagicMock, async_test
class IsReattachableRequestFilterTestCase(BaseFilterTestCase):
filter_type = IsReattachableCommand(MockAdapter()).get_request_filter
skip_value_check = True
def setUp(self):
super(IsReattachableRequestFilterTestCase, self).setUp()
# Define a few valid values that we can reuse across tests.
self.address_1 = (
'TESTVALUE9DONTUSEINPRODUCTION99999EKJZZT'
'SOGJOUNVEWLDPKGTGAOIZIPMGBLHC9LMQNHLGXGYX'
)
self.address_2 = (
'TESTVALUE9DONTUSEINPRODUCTION99999FDCDTZ'
'ZWLL9MYGUTLSYVSIFJ9NGALTRMCQVIIOVEQOITYTE'
)
def test_pass_happy_path(self):
"""
Filter for list of valid string addresses
"""
request = {
# Raw trytes are extracted to match the IRI's JSON protocol.
'addresses': [
self.address_1,
Address(self.address_2)
],
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'addresses': [
str(Address(self.address_1)),
str(Address(self.address_2))
],
},
)
def test_pass_compatible_types(self):
"""
The incoming request contains values that can be converted to the
expected types.
"""
request = {
'addresses': [
Address(self.address_1),
bytearray(self.address_2.encode('ascii')),
],
}
filter_ = self._filter(request)
self.assertFilterPasses(filter_)
self.assertDictEqual(
filter_.cleaned_data,
{
'addresses': [self.address_1, self.address_2],
},
)
def test_pass_incompatible_types(self):
"""
The incoming request contains values that can NOT be converted to the
expected types.
"""
request = {
'addresses': [
1234234,
False
],
}
self.assertFilterErrors(
request,
{
'addresses.0': [f.Type.CODE_WRONG_TYPE],
'addresses.1': [f.Type.CODE_WRONG_TYPE]
},
)
def test_fail_empty(self):
"""
The incoming request is empty.
"""
self.assertFilterErrors(
{},
{
'addresses': [f.FilterMapper.CODE_MISSING_KEY],
},
)
def test_fail_single_address(self):
"""
The incoming request contains a single address
"""
request = {
'addresses': Address(self.address_1)
}
self.assertFilterErrors(
request,
{
'addresses': [f.Type.CODE_WRONG_TYPE],
}
)
class IsReattachableResponseFilterTestCase(BaseFilterTestCase):
filter_type = IsReattachableCommand(MockAdapter()).get_response_filter
skip_value_check = True
def setUp(self):
super(IsReattachableResponseFilterTestCase, self).setUp()
# Define a few valid values that we can reuse across tests.
self.addresses_1 = (
'TESTVALUE9DONTUSEINPRODUCTION99999EKJZZT'
'SOGJOUNVEWLDPKGTGAOIZIPMGBLHC9LMQNHLGXGYX'
)
def test_pass_happy_path(self):
"""
Typical ``IsReattachable`` request.
"""
response = {
'reattachable': [True, False]
}
filter_ = self._filter(response)
self.assertFilterPasses(filter_)
self.assertDictEqual(filter_.cleaned_data, response)
def test_fail_empty(self):
"""
The incoming response is empty.
"""
self.assertFilterErrors(
{},
{
'reattachable': [f.Required.CODE_EMPTY],
},
)
def test_pass_incompatible_types(self):
"""
The response contains values that can NOT be converted to the
expected types.
"""
request = {
'reattachable': [
1234234,
b'',
'test'
],
}
self.assertFilterErrors(
request,
{
'reattachable.0': [f.Type.CODE_WRONG_TYPE],
'reattachable.1': [f.Type.CODE_WRONG_TYPE],
'reattachable.2': [f.Type.CODE_WRONG_TYPE]
},
)
class IsReattachableCommandTestCase(TestCase):
def setUp(self):
super(IsReattachableCommandTestCase, self).setUp()
self.adapter = MockAdapter()
def test_wireup(self):
"""
Verify that the command is wired up correctly. (sync)
The API method indeed calls the appropiate command.
"""
with patch('iota.commands.extended.is_reattachable.IsReattachableCommand.__call__',
MagicMock(return_value=async_return('You found me!'))
) as mocked_command:
api = Iota(self.adapter)
# Don't need to call with proper args here.
response = api.is_reattachable('addresses')
self.assertTrue(mocked_command.called)
self.assertEqual(
response,
'You found me!'
)
@async_test
async def test_wireup_async(self):
"""
Verify that the command is wired up correctly. (async)
The API method indeed calls the appropiate command.
"""
with patch('iota.commands.extended.is_reattachable.IsReattachableCommand.__call__',
MagicMock(return_value=async_return('You found me!'))
) as mocked_command:
api = AsyncIota(self.adapter)
# Don't need to call with proper args here.
response = await api.is_reattachable('addresses')
self.assertTrue(mocked_command.called)
self.assertEqual(
response,
'You found me!'
)
|
|
#!/usr/bin/env python
"""
Asset Management - Deployments: Support for cache related functions.
"""
__author__ = 'Edna Donoughe'
from copy import deepcopy
from ooiservices.app import cache
from ooiservices.app.uframe.config import get_cache_timeout
from ooiservices.app.uframe.uframe_tools import _get_id_by_uid
from ooiservices.app.uframe.common_tools import is_instrument
#CACHE_TIMEOUT = 172800
def refresh_deployment_cache(id, deployment, action, mid, nid, sid):
""" Perform deployment cache refresh.
"""
try:
# Refresh deployment cache.
deployment_cache_refresh(id, deployment, action, mid, nid, sid)
except Exception as err:
message = str(err)
raise Exception(message)
def deployment_cache_refresh(id, deployment, action, mid, nid, sid):
""" Add an deployment to 'rd_assets' for deployment cache.
{
"assetUid" : null,
"rd": "CE01ISSP-SP001-02-DOSTAJ000",
"dataSource": "UI:user=edna",
"deployCruiseInfo": null,
"deployedBy": "Test engineer",
"deploymentNumber": 3027,
"editPhase" : "OPERATIONAL",
"eventName": "Coastal Pioneer:Central Surface Piercing Profiler Mooring",
"eventStartTime": 1506434340000,
"eventStopTime": 1509034390000,
"eventType": "DEPLOYMENT",
"inductiveId" : null,
"ingestInfo": null,
"depth": 0.0,
"longitude" : -124.09567,
"latitude" : 44.66415,
"orbitRadius": 0.0,
"mooring_uid": "N00262",
"node_uid": "N00122",
"notes" : null,
"recoverCruiseInfo": null,
"recoveredBy": "Test engineer",
"sensor_uid": "N00580",
"versionNumber": 3027
}
"""
try:
mooring_id, node_id, sensor_id, eventId, deployment_rd, deploymentNumber, location, startTime, stopTime = \
get_deployment_cache_info(deployment)
if not is_instrument(deployment_rd):
message = 'The reference designator provided for refreshing deployment cache is not an instrument.'
raise Exception(message)
mooring_rd, node, sensor = deployment_rd.split('-', 2)
node_rd = '-'.join([mooring_rd, node])
sensor_rd = deployment_rd
rds = [mooring_rd, node_rd, sensor_rd]
for rd in rds:
# Add deployment to deployment cache ('rd_assets')
deployment_cache = cache.get('rd_assets')
if deployment_cache:
deployments_dict = deployment_cache
if isinstance(deployments_dict, dict):
# Determine if rd in deployments dictionary.
if rd in deployments_dict:
work = deepcopy(deployments_dict[rd])
# If deployment number in dictionary, verify asset ids are represented.
if deploymentNumber in work:
#------------
# Update deployment asset ids with mooring, node and sensor id information
# Mooring
if mooring_id is not None:
target_asset_type = 'mooring'
if mooring_id not in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].append(mooring_id)
if target_asset_type in work[deploymentNumber]['asset_ids_by_type']:
if mooring_id not in work[deploymentNumber]['asset_ids_by_type'][target_asset_type]:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type].append(mooring_id)
else:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type] = [mooring_id]
# Main dictionary asset_ids list
if mooring_id not in work['asset_ids']:
work['asset_ids'].append(mooring_id)
if mooring_id not in work['asset_ids_by_type'][target_asset_type]:
work['asset_ids_by_type'][target_asset_type].append(mooring_id)
else:
# Mooring id is None or empty...use original mooring id provided (mid).
# If original mooring id (mid) is None and current setting is None, no change (go on).
# else we are removing this mooring id from deployment map.
if mid is not None:
target_asset_type = 'mooring'
if mid in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].remove(mid)
if target_asset_type in work[deploymentNumber]['asset_ids_by_type']:
if mid in work[deploymentNumber]['asset_ids_by_type'][target_asset_type]:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type].remove(mid)
# Main dictionary asset_ids list
if mid in work['asset_ids']:
work['asset_ids'].remove(mid)
if mid in work['asset_ids_by_type'][target_asset_type]:
work['asset_ids_by_type'][target_asset_type].remove(mid)
# Node
if node_id is not None:
target_asset_type = 'node'
if node_id not in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].append(node_id)
if target_asset_type in work[deploymentNumber]['asset_ids_by_type']:
if node_id not in work[deploymentNumber]['asset_ids_by_type'][target_asset_type]:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type].append(node_id)
else:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type] = [node_id]
# Main dictionary asset_ids list
if node_id not in work['asset_ids']:
work['asset_ids'].append(node_id)
if node_id not in work['asset_ids_by_type'][target_asset_type]:
work['asset_ids_by_type'][target_asset_type].append(node_id)
else:
# Node id is None or empty...use original node id provided (nid).
# If original node id (nid) is None and current setting is None, no change (go on).
# else we are removing this node id from deployment map.
if nid is not None:
target_asset_type = 'node'
if nid in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].remove(nid)
if target_asset_type in work[deploymentNumber]['asset_ids_by_type']:
if nid in work[deploymentNumber]['asset_ids_by_type'][target_asset_type]:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type].remove(nid)
# Main dictionary asset_ids list
if nid in work['asset_ids']:
work['asset_ids'].remove(nid)
if nid in work['asset_ids_by_type'][target_asset_type]:
work['asset_ids_by_type'][target_asset_type].remove(nid)
# Sensor
if sensor_id is not None:
target_asset_type = 'sensor'
if sensor_id not in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].append(sensor_id)
if target_asset_type in work[deploymentNumber]['asset_ids_by_type']:
if sensor_id not in work[deploymentNumber]['asset_ids_by_type'][target_asset_type]:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type].append(sensor_id)
else:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type] = [sensor_id]
#- - - - - - - - - - - - - - - - - - - - - - - - - -
# Main dictionary asset_ids list
#- - - - - - - - - - - - - - - - - - - - - - - - - -
if sensor_id not in work['asset_ids']:
work['asset_ids'].append(sensor_id)
if sensor_id not in work['asset_ids_by_type'][target_asset_type]:
work['asset_ids_by_type'][target_asset_type].append(sensor_id)
else:
# Sensor id is None or empty...use original sensor id provided (nid).
# If original sensor id (sid) is None and current setting is None, no change (go on).
# else we are removing this sensor id from deployment map.
if sid is not None:
target_asset_type = 'sensor'
if sid in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].remove(sid)
if target_asset_type in work[deploymentNumber]['asset_ids_by_type']:
if sid in work[deploymentNumber]['asset_ids_by_type'][target_asset_type]:
work[deploymentNumber]['asset_ids_by_type'][target_asset_type].remove(sid)
# Main dictionary asset_ids list
if sid in work['asset_ids']:
work['asset_ids'].remove(sid)
if sid in work['asset_ids_by_type'][target_asset_type]:
work['asset_ids_by_type'][target_asset_type].remove(sid)
# Common elements in work[deploymentNumber]
work[deploymentNumber]['beginDT'] = startTime
work[deploymentNumber]['endDT'] = stopTime
work[deploymentNumber]['eventId'] = eventId
work[deploymentNumber]['location'] = location
# deploymentNumber in work, therefore should be in work[deployments, verify and update is not.
if deploymentNumber not in work['deployments']:
#print '\n *** Added deploymentNumber %d for rd %s...' % (deploymentNumber, rd)
work['deployments'].append(deploymentNumber)
if work['deployments']:
work['deployments'].sort(reverse=True)
#------------
else:
work['current_deployment'] = deploymentNumber
# Update deployment entry for rd.
deployments_dict[rd] = work
else:
new_deployment = {}
new_deployment['beginDT'] = startTime
new_deployment['endDT'] = stopTime
new_deployment['eventId'] = eventId
new_deployment['location'] = location
new_deployment['asset_ids_by_type'] = {'mooring': [], 'node': [], 'sensor': []}
new_deployment['asset_ids'] = []
work[deploymentNumber] = new_deployment
if mooring_id is not None:
if mooring_id not in work['asset_ids']:
work['asset_ids'].append(mooring_id)
if mooring_id not in work['asset_ids_by_type']['mooring']:
work['asset_ids_by_type']['mooring'].append(mooring_id)
if mooring_id not in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].append(mooring_id)
if mooring_id not in work[deploymentNumber]['asset_ids_by_type']['mooring']:
work[deploymentNumber]['asset_ids_by_type']['mooring'].append(mooring_id)
if node_id is not None:
if node_id not in work['asset_ids']:
work['asset_ids'].append(node_id)
if node_id not in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].append(node_id)
if node_id not in work['asset_ids_by_type']['node']:
work['asset_ids_by_type']['node'].append(node_id)
if node_id not in work[deploymentNumber]['asset_ids_by_type']['node']:
work[deploymentNumber]['asset_ids_by_type']['node'].append(node_id)
if sensor_id is not None:
if sensor_id not in work['asset_ids']:
work['asset_ids'].append(sensor_id)
if sensor_id not in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].append(sensor_id)
if sensor_id not in work['asset_ids_by_type']['sensor']:
work['asset_ids_by_type']['sensor'].append(sensor_id)
if sensor_id not in work[deploymentNumber]['asset_ids_by_type']['sensor']:
work[deploymentNumber]['asset_ids_by_type']['sensor'].append(sensor_id)
if deploymentNumber not in work['deployments']:
work['deployments'].append(deploymentNumber)
deployments_list = work['deployments']
deployments_list.sort(reverse=True)
current_deployment_number = deployments_list[0]
work['current_deployment'] = current_deployment_number
#---------
deployments_dict[rd] = work
# Build dictionary for rd, then add to rd_assets
else:
work = {}
work['current_deployment'] = deploymentNumber
work['deployments'] = [deploymentNumber]
work[deploymentNumber] = {}
work[deploymentNumber]['beginDT'] = startTime
work[deploymentNumber]['endDT'] = stopTime
work[deploymentNumber]['eventId'] = eventId
work[deploymentNumber]['location'] = location
work[deploymentNumber]['current_deployment'] = deploymentNumber
work[deploymentNumber]['asset_ids_by_type'] = {'mooring': [], 'node': [], 'sensor': []}
work[deploymentNumber]['asset_ids'] = []
work['asset_ids'] = []
work['asset_ids_by_type'] = {'mooring': [], 'node': [], 'sensor': []}
if mooring_id is not None:
work['asset_ids'].append(mooring_id)
work['asset_ids_by_type']['mooring'].append(mooring_id)
work[deploymentNumber]['asset_ids'].append(mooring_id)
work[deploymentNumber]['asset_ids_by_type']['mooring'].append(mooring_id)
if node_id is not None:
if node_id not in work['asset_ids']:
work['asset_ids'].append(node_id)
if node_id not in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].append(node_id)
work['asset_ids_by_type']['node'].append(node_id)
work[deploymentNumber]['asset_ids_by_type']['node'].append(node_id)
if sensor_id is not None:
if sensor_id not in work['asset_ids']:
work['asset_ids'].append(sensor_id)
if sensor_id not in work[deploymentNumber]['asset_ids']:
work[deploymentNumber]['asset_ids'].append(sensor_id)
work['asset_ids_by_type']['sensor'].append(sensor_id)
work[deploymentNumber]['asset_ids_by_type']['sensor'].append(sensor_id)
deployments_dict[rd] = work
cache.set('rd_assets', deployments_dict, timeout=get_cache_timeout())
return
except Exception as err:
message = str(err)
raise Exception(message)
def get_deployment_cache_info(deployment):
""" Get bundle of information used to update deployment cache.
"""
try:
startTime = None
stopTime = None
#- - - - - - - - - - - - - - - - - - - -
# Get deployment event id.
#- - - - - - - - - - - - - - - - - - - -
if 'eventId' not in deployment:
message = 'The event id was not provided in deployment for cache refresh.'
raise Exception(message)
else:
eventId = deployment['eventId']
if eventId is None:
message = 'The event id is null, invalid deployment for cache refresh.'
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - -
# Get deployment reference designator.
#- - - - - - - - - - - - - - - - - - - -
if 'rd' not in deployment:
message = 'The reference designator was not provided in deployment for cache refresh.'
raise Exception(message)
else:
rd = deployment['rd']
if rd is None:
message = 'The reference designator is null, invalid deployment for cache refresh.'
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - -
# Get deploymentNumber.
#- - - - - - - - - - - - - - - - - - - -
if 'deploymentNumber' not in deployment:
message = 'The deployment number was not provided in deployment for cache refresh.'
raise Exception(message)
else:
deploymentNumber = deployment['deploymentNumber']
if deploymentNumber is None:
message = 'The deployment number is null, invalid deployment for cache refresh.'
raise Exception(message)
#- - - - - - - - - - - - - - - - - - - -
# Get location, startTime, stopTime.
#- - - - - - - - - - - - - - - - - - - -
location = get_location_dictionary(deployment)
if 'startTime' in deployment:
startTime = deployment['startTime']
if 'stopTime' in deployment:
stopTime = deployment['stopTime']
#- - - - - - - - - - - - - - - - - - - -
# Get asset ids by type.
#- - - - - - - - - - - - - - - - - - - -
mooring_id = None
node_id = None
sensor_id = None
# Get mooring_id using mooring_uid.
if 'mooring_uid' in deployment:
if not deployment['mooring_uid']:
deployment['mooring_uid'] = None
target_asset_type = 'Mooring'
if deployment['mooring_uid'] is not None:
mooring_id, asset_type = _get_id_by_uid(deployment['mooring_uid'])
if asset_type != target_asset_type:
message = 'The asset with uid \'%s\', asset id %d is a %s asset, not a \'%s\'.' % \
(deployment['mooring_uid'], mooring_id, asset_type, target_asset_type)
raise Exception(message)
# Get node_id using node_uid.
if 'node_uid' in deployment:
if not deployment['node_uid']:
deployment['node_uid'] = None
target_asset_type = 'Node'
if deployment['node_uid'] is not None:
node_id, asset_type = _get_id_by_uid(deployment['node_uid'])
if node_id is None:
message = 'The node_uid \'%s\' is invalid, it failed to return an asset id from uframe.' % \
deployment['mooring_uid']
raise Exception(message)
if asset_type != target_asset_type:
message = 'The asset with uid \'%s\', asset id %d is a %s asset, not a \'%s\'.' % \
(deployment['node_uid'], node_id, asset_type, target_asset_type)
raise Exception(message)
# Get sensor_id using sensor_uid.
if 'sensor_uid' in deployment:
if not deployment['sensor_uid']:
deployment['sensor_uid'] = None
target_asset_type = 'Sensor'
if deployment['sensor_uid'] is not None:
sensor_id, asset_type = _get_id_by_uid(deployment['sensor_uid'])
if sensor_id is None:
message = 'The sensor_id \'%s\' is invalid, it failed to return an asset id from uframe.' % \
deployment['sensor_uid']
raise Exception(message)
if asset_type != target_asset_type:
message = 'The asset with uid \'%s\', asset id %d is a %s asset, not a \'%s\'.' % \
(deployment['sensor_uid'], sensor_id, asset_type, target_asset_type)
raise Exception(message)
return mooring_id, node_id, sensor_id, eventId, rd, deploymentNumber, location, startTime, stopTime
except Exception as err:
message = str(err)
raise Exception(message)
def get_location_dictionary(deployment):
""" Construct location dictionary from ui deployment information.
"""
try:
have_location_dict = False
latitude = None
longitude = None
location = None
depth = None
orbitRadius = None
if 'depth' in deployment:
depth = deployment['depth']
if depth is None:
depth = 0.0
else:
have_location_dict = True
if 'orbitRadius' in deployment:
orbitRadius = deployment['orbitRadius']
if orbitRadius is None:
orbitRadius = 0.0
else:
have_location_dict = True
if 'latitude' in deployment:
latitude = deployment['latitude']
if 'longitude' in deployment:
longitude = deployment['longitude']
if latitude is not None and longitude is not None:
location = [longitude, latitude]
have_location_dict = True
else:
if latitude is None:
latitude = 0.0
if longitude is None:
longitude = 0.0
if have_location_dict:
location_dict = {}
location_dict['latitude'] = latitude
location_dict['longitude'] = longitude
location_dict['location'] = location
location_dict['depth'] = depth
location_dict['orbitRadius'] = orbitRadius
else:
location_dict = None
return location_dict
except Exception as err:
message = str(err)
raise Exception(message)
|
|
"""Test the Xiaomi Aqara config flow."""
from socket import gaierror
import pytest
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.components.xiaomi_aqara import config_flow, const
from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME, CONF_PORT
from tests.async_mock import Mock, patch
ZEROCONF_NAME = "name"
ZEROCONF_PROP = "properties"
ZEROCONF_MAC = "mac"
TEST_HOST = "1.2.3.4"
TEST_HOST_2 = "5.6.7.8"
TEST_KEY = "1234567890123456"
TEST_PORT = 1234
TEST_NAME = "Test_Aqara_Gateway"
TEST_SID = "abcdefghijkl"
TEST_PROTOCOL = "1.1.1"
TEST_MAC = "ab:cd:ef:gh:ij:kl"
TEST_GATEWAY_ID = TEST_MAC
TEST_ZEROCONF_NAME = "lumi-gateway-v3_miio12345678._miio._udp.local."
@pytest.fixture(name="xiaomi_aqara", autouse=True)
def xiaomi_aqara_fixture():
"""Mock xiaomi_aqara discovery and entry setup."""
mock_gateway_discovery = get_mock_discovery([TEST_HOST])
with patch(
"homeassistant.components.xiaomi_aqara.config_flow.XiaomiGatewayDiscovery",
return_value=mock_gateway_discovery,
), patch(
"homeassistant.components.xiaomi_aqara.async_setup_entry", return_value=True
):
yield
def get_mock_discovery(host_list, invalid_interface=False, invalid_key=False):
"""Return a mock gateway info instance."""
gateway_discovery = Mock()
gateway_dict = {}
for host in host_list:
gateway = Mock()
gateway.ip_adress = host
gateway.port = TEST_PORT
gateway.sid = TEST_SID
gateway.proto = TEST_PROTOCOL
if invalid_key:
gateway.write_to_hub = Mock(return_value=False)
gateway_dict[host] = gateway
gateway_discovery.gateways = gateway_dict
if invalid_interface:
gateway_discovery.discover_gateways = Mock(side_effect=gaierror)
return gateway_discovery
async def test_config_flow_user_success(hass):
"""Test a successful config flow initialized by the user."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE},
)
assert result["type"] == "form"
assert result["step_id"] == "settings"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_KEY: TEST_KEY, CONF_NAME: TEST_NAME},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_PORT: TEST_PORT,
CONF_MAC: TEST_MAC,
const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE,
const.CONF_PROTOCOL: TEST_PROTOCOL,
const.CONF_KEY: TEST_KEY,
const.CONF_SID: TEST_SID,
}
async def test_config_flow_user_multiple_success(hass):
"""Test a successful config flow initialized by the user with multiple gateways discoverd."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_gateway_discovery = get_mock_discovery([TEST_HOST, TEST_HOST_2])
with patch(
"homeassistant.components.xiaomi_aqara.config_flow.XiaomiGatewayDiscovery",
return_value=mock_gateway_discovery,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE},
)
assert result["type"] == "form"
assert result["step_id"] == "select"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"select_ip": TEST_HOST_2},
)
assert result["type"] == "form"
assert result["step_id"] == "settings"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_KEY: TEST_KEY, CONF_NAME: TEST_NAME},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST_2,
CONF_PORT: TEST_PORT,
CONF_MAC: TEST_MAC,
const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE,
const.CONF_PROTOCOL: TEST_PROTOCOL,
const.CONF_KEY: TEST_KEY,
const.CONF_SID: TEST_SID,
}
async def test_config_flow_user_no_key_success(hass):
"""Test a successful config flow initialized by the user without a key."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE},
)
assert result["type"] == "form"
assert result["step_id"] == "settings"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_NAME: TEST_NAME},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_PORT: TEST_PORT,
CONF_MAC: TEST_MAC,
const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE,
const.CONF_PROTOCOL: TEST_PROTOCOL,
const.CONF_KEY: None,
const.CONF_SID: TEST_SID,
}
async def test_config_flow_user_discovery_error(hass):
"""Test a failed config flow initialized by the user with no gateways discoverd."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_gateway_discovery = get_mock_discovery([])
with patch(
"homeassistant.components.xiaomi_aqara.config_flow.XiaomiGatewayDiscovery",
return_value=mock_gateway_discovery,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "discovery_error"}
async def test_config_flow_user_invalid_interface(hass):
"""Test a failed config flow initialized by the user with an invalid interface."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_gateway_discovery = get_mock_discovery([], invalid_interface=True)
with patch(
"homeassistant.components.xiaomi_aqara.config_flow.XiaomiGatewayDiscovery",
return_value=mock_gateway_discovery,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {const.CONF_INTERFACE: "invalid_interface"}
async def test_config_flow_user_invalid_key(hass):
"""Test a failed config flow initialized by the user with an invalid key."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_gateway_discovery = get_mock_discovery([TEST_HOST], invalid_key=True)
with patch(
"homeassistant.components.xiaomi_aqara.config_flow.XiaomiGatewayDiscovery",
return_value=mock_gateway_discovery,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE},
)
assert result["type"] == "form"
assert result["step_id"] == "settings"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_KEY: TEST_KEY, CONF_NAME: TEST_NAME},
)
assert result["type"] == "form"
assert result["step_id"] == "settings"
assert result["errors"] == {const.CONF_KEY: "invalid_key"}
async def test_zeroconf_success(hass):
"""Test a successful zeroconf discovery of a xiaomi aqara gateway."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
zeroconf.ATTR_HOST: TEST_HOST,
ZEROCONF_NAME: TEST_ZEROCONF_NAME,
ZEROCONF_PROP: {ZEROCONF_MAC: TEST_MAC},
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE},
)
assert result["type"] == "form"
assert result["step_id"] == "settings"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_KEY: TEST_KEY, CONF_NAME: TEST_NAME},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_PORT: TEST_PORT,
CONF_MAC: TEST_MAC,
const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE,
const.CONF_PROTOCOL: TEST_PROTOCOL,
const.CONF_KEY: TEST_KEY,
const.CONF_SID: TEST_SID,
}
async def test_zeroconf_missing_data(hass):
"""Test a failed zeroconf discovery because of missing data."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={zeroconf.ATTR_HOST: TEST_HOST, ZEROCONF_NAME: TEST_ZEROCONF_NAME},
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_aqara"
async def test_zeroconf_unknown_device(hass):
"""Test a failed zeroconf discovery because of a unknown device."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
zeroconf.ATTR_HOST: TEST_HOST,
ZEROCONF_NAME: "not-a-xiaomi-aqara-gateway",
ZEROCONF_PROP: {ZEROCONF_MAC: TEST_MAC},
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_xiaomi_aqara"
async def test_zeroconf_not_found_error(hass):
"""Test a failed zeroconf discovery because the correct gateway could not be found."""
result = await hass.config_entries.flow.async_init(
const.DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
zeroconf.ATTR_HOST: TEST_HOST,
ZEROCONF_NAME: TEST_ZEROCONF_NAME,
ZEROCONF_PROP: {ZEROCONF_MAC: TEST_MAC},
},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_gateway_discovery = get_mock_discovery([TEST_HOST_2])
with patch(
"homeassistant.components.xiaomi_aqara.config_flow.XiaomiGatewayDiscovery",
return_value=mock_gateway_discovery,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {const.CONF_INTERFACE: config_flow.DEFAULT_INTERFACE},
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "not_found_error"}
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility functions used across api code."""
import logging
import subprocess
from decorator import decorator
try:
from google.protobuf.descriptor import FieldDescriptor
#Map descriptor.CPPTYPE -> python type.
_python_to_cpp_types = {
long: ('int32', 'int64', 'uint32', 'uint64'),
float: ('double', 'float'),
bool: ('bool',),
str: ('string',),
}
cpp_type_to_python = dict(
(getattr(FieldDescriptor, 'CPPTYPE_' + cpp.upper()), python)
for (python, cpplist) in _python_to_cpp_types.items()
for cpp in cpplist
)
except: pass
from gmusicapi import __version__
log = logging.getLogger(__name__)
root_logger_name = "gmusicapi"
log_filename = "gmusicapi.log"
#set to True after configure_debug_logging is called to prevent
# setting up more than once
log_already_configured_flag = '_gmusicapi_debug_logging_setup'
def configure_debug_logging():
"""Warnings and above to terminal, below to gmusicapi.log.
Output includes line number."""
root_logger = logging.getLogger('gmusicapi')
if not getattr(root_logger, 'log_already_configured_flag', None):
root_logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_filename)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
root_logger.addHandler(fh)
root_logger.addHandler(ch)
#print out startup message without verbose formatting
root_logger.info("!-- begin debug log --!")
root_logger.info("version: " + __version__)
formatter = logging.Formatter(
'%(asctime)s - %(name)s (%(lineno)s) [%(levelname)s]: %(message)s'
)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
setattr(root_logger, 'log_already_configured_flag', True)
def pb_set(msg, field_name, val):
"""Return True and set val to field_name in msg if the assignment
is type-compatible, else return False.
val will be coerced to a proper type if needed.
:param msg: an instance of a protobuf.message
:param field_name:
:param val
"""
#Find the proper type.
field_desc = msg.DESCRIPTOR.fields_by_name[field_name]
proper_type = cpp_type_to_python[field_desc.cpp_type]
#Try with the given type first.
#Their set hooks will automatically coerce.
try_types = (type(val), proper_type)
for t in try_types:
log.debug("attempt %s.%s = %s(%r)", msg.__class__.__name__, field_name, t, val)
try:
setattr(msg, field_name, t(val))
log.debug("! success")
break
except (TypeError, ValueError):
log.debug("X failure")
else:
return False # no assignments stuck
return True
def transcode_to_mp3(filepath, quality=3, slice_start=None, slice_duration=None):
"""Return the bytestring result of transcoding the file at *filepath* to mp3.
An ID3 header is not included in the result.
:param filepath: location of file
:param quality: if int, pass to avconv -qscale. if string, pass to avconv -ab
-qscale roughly corresponds to libmp3lame -V0, -V1...
:param slice_start: (optional) transcode a slice, starting at this many seconds
:param slice_duration: (optional) when used with slice_start, the number of seconds in the slice
Raise IOError on transcoding problems, or ValueError on param problems.
"""
err_output = None
cmd = ['avconv', '-i', filepath]
if slice_duration is not None:
cmd.extend(['-t', str(slice_duration)])
if slice_start is not None:
cmd.extend(['-ss', str(slice_start)])
if isinstance(quality, int):
cmd.extend(['-qscale', str(quality)])
elif isinstance(quality, basestring):
cmd.extend(['-ab', quality])
else:
raise ValueError("quality must be int or string, but received %r" % quality)
cmd.extend(['-f', 's16le', # don't output id3 headers
'-c', 'libmp3lame',
'pipe:1'])
log.debug('running transcode command %r', cmd)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
audio_out, err_output = proc.communicate()
if proc.returncode != 0:
err_output = ("(return code: %r)\n" % proc.returncode) + err_output
raise IOError # handle errors in except
except (OSError, IOError) as e:
log.exception('transcoding failure')
err_msg = "transcoding failed: %s. " % e
if err_output is not None:
err_msg += "stderr: '%s'" % err_output
log.debug('full failure output: %s', err_output)
raise IOError(err_msg)
else:
return audio_out
def truncate(x, max_els=100, recurse_levels=0):
"""Return a 'shorter' truncated x of the same type, useful for logging.
recurse_levels is only valid for homogeneous lists/tuples.
max_els ignored for song dictionaries."""
#Coerce tuple to list to ease truncation.
is_tuple = False
if isinstance(x, tuple):
is_tuple = True
x = list(x)
try:
if len(x) > max_els:
if isinstance(x, basestring):
return x[:max_els] + '...'
if isinstance(x, dict):
if 'id' in x and 'titleNorm' in x:
#assume to be a song dict
trunc = dict((k, x.get(k)) for k in ['title', 'artist', 'album'])
trunc['...'] = '...'
return trunc
else:
return dict(x.items()[:max_els] + [('...', '...')])
if isinstance(x, list):
trunc = x[:max_els] + ['...']
if recurse_levels > 0:
trunc = [truncate(e, recurse_levels - 1) for e in trunc[:-1]]
if is_tuple:
trunc = tuple(trunc)
return trunc
except TypeError:
#does not have len
pass
return x
def empty_arg_shortcircuit(return_code='[]', position=1):
"""Decorate a function to shortcircuit and return something immediately if
the length of a positional arg is 0.
:param return_code: (optional) code to exec as the return value - default is a list.
:param position: (optional) the position of the expected list - default is 1.
"""
#The normal pattern when making a collection an optional arg is to use
# a sentinel (like None). Otherwise, you run the risk of the collection
# being mutated - there's only one, not a new one on each call.
#Here we've got multiple things we'd like to
# return, so we can't do that. Rather than make some kind of enum for
# 'accepted return values' I'm just allowing freedom to return anything.
#Less safe? Yes. More convenient? Definitely.
@decorator
def wrapper(function, *args, **kw):
if len(args[position]) == 0:
#avoid polluting our namespace
ns = {}
exec 'retval = ' + return_code in ns
return ns['retval']
else:
return function(*args, **kw)
return wrapper
def accept_singleton(expected_type, position=1):
"""Allows a function expecting a list to accept a single item as well.
The item will be wrapped in a list.
Will not work for nested lists.
:param expected_type: the type of the items in the list
:param position: (optional) the position of the expected list - defaults to 1.
"""
@decorator
def wrapper(function, *args, **kw):
if isinstance(args[position], expected_type):
#args are a tuple, can't assign into them
args = list(args)
args[position] = [args[position]]
args = tuple(args)
return function(*args, **kw)
return wrapper
#Used to mark a field as unimplemented.
@property
def NotImplementedField(self):
raise NotImplementedError
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.core import util
from telemetry.page import page_set
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
BLINK_PERF_BASE_DIR = os.path.join(util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
SKIPPED_FILE = os.path.join(BLINK_PERF_BASE_DIR, 'Skipped')
def CreatePageSetFromPath(path, skipped_file):
assert os.path.exists(path)
page_urls = []
serving_dirs = set()
def _AddPage(path):
if not path.endswith('.html'):
return
if '../' in open(path, 'r').read():
# If the page looks like it references its parent dir, include it.
serving_dirs.add(os.path.dirname(os.path.dirname(path)))
page_urls.append('file://' + path.replace('\\', '/'))
def _AddDir(dir_path, skipped):
for candidate_path in os.listdir(dir_path):
if candidate_path == 'resources':
continue
candidate_path = os.path.join(dir_path, candidate_path)
if candidate_path.startswith(skipped):
continue
if os.path.isdir(candidate_path):
_AddDir(candidate_path, skipped)
else:
_AddPage(candidate_path)
if os.path.isdir(path):
skipped = []
if os.path.exists(skipped_file):
for line in open(skipped_file, 'r').readlines():
line = line.strip()
if line and not line.startswith('#'):
skipped_path = os.path.join(os.path.dirname(skipped_file), line)
skipped.append(skipped_path.replace('/', os.sep))
_AddDir(path, tuple(skipped))
else:
_AddPage(path)
ps = page_set.PageSet(file_path=os.getcwd()+os.sep,
serving_dirs=serving_dirs)
for url in page_urls:
ps.AddUserStory(page_module.Page(url, ps, ps.base_dir))
return ps
class _BlinkPerfMeasurement(page_test.PageTest):
"""Tuns a blink performance test and reports the results."""
def __init__(self):
super(_BlinkPerfMeasurement, self).__init__(
action_name_to_run='RunPageInteractions')
with open(os.path.join(os.path.dirname(__file__),
'blink_perf.js'), 'r') as f:
self._blink_perf_js = f.read()
def WillNavigateToPage(self, page, tab):
page.script_to_evaluate_on_commit = self._blink_perf_js
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--js-flags=--expose_gc',
'--enable-experimental-web-platform-features',
'--disable-gesture-requirement-for-media-playback'
])
if 'content-shell' in options.browser_type:
options.AppendExtraBrowserArgs('--expose-internals-for-testing')
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('testRunner.isDone', 600)
log = tab.EvaluateJavaScript('document.getElementById("log").innerHTML')
for line in log.splitlines():
if not line.startswith('values '):
continue
parts = line.split()
values = [float(v.replace(',', '')) for v in parts[1:-1]]
units = parts[-1]
metric = page.display_name.split('.')[0].replace('/', '_')
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, metric, units, values))
break
print log
class _BlinkPerfFullFrameMeasurement(_BlinkPerfMeasurement):
def __init__(self):
super(_BlinkPerfFullFrameMeasurement, self).__init__()
self._blink_perf_js += '\nwindow.fullFrameMeasurement = true;'
def CustomizeBrowserOptions(self, options):
super(_BlinkPerfFullFrameMeasurement, self).CustomizeBrowserOptions(
options)
# Full layout measurement needs content_shell with internals testing API.
assert 'content-shell' in options.browser_type
options.AppendExtraBrowserArgs(['--expose-internals-for-testing'])
class BlinkPerfAnimation(benchmark.Benchmark):
tag = 'animation'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Animation')
return CreatePageSetFromPath(path, SKIPPED_FILE)
class BlinkPerfBindings(benchmark.Benchmark):
tag = 'bindings'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Bindings')
return CreatePageSetFromPath(path, SKIPPED_FILE)
@benchmark.Enabled('content-shell')
class BlinkPerfBlinkGC(benchmark.Benchmark):
tag = 'blink_gc'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'BlinkGC')
return CreatePageSetFromPath(path, SKIPPED_FILE)
class BlinkPerfCSS(benchmark.Benchmark):
tag = 'css'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'CSS')
return CreatePageSetFromPath(path, SKIPPED_FILE)
class BlinkPerfCanvas(benchmark.Benchmark):
tag = 'canvas'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Canvas')
return CreatePageSetFromPath(path, SKIPPED_FILE)
class BlinkPerfDOM(benchmark.Benchmark):
tag = 'dom'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'DOM')
return CreatePageSetFromPath(path, SKIPPED_FILE)
class BlinkPerfEvents(benchmark.Benchmark):
tag = 'events'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Events')
return CreatePageSetFromPath(path, SKIPPED_FILE)
class BlinkPerfLayout(benchmark.Benchmark):
tag = 'layout'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Layout')
return CreatePageSetFromPath(path, SKIPPED_FILE)
@benchmark.Enabled('content-shell')
class BlinkPerfLayoutFullLayout(BlinkPerfLayout):
tag = 'layout_full_frame'
test = _BlinkPerfFullFrameMeasurement
class BlinkPerfMutation(benchmark.Benchmark):
tag = 'mutation'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Mutation')
return CreatePageSetFromPath(path, SKIPPED_FILE)
class BlinkPerfParser(benchmark.Benchmark):
tag = 'parser'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Parser')
return CreatePageSetFromPath(path, SKIPPED_FILE)
class BlinkPerfSVG(benchmark.Benchmark):
tag = 'svg'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'SVG')
return CreatePageSetFromPath(path, SKIPPED_FILE)
@benchmark.Enabled('content-shell')
class BlinkPerfSVGFullLayout(BlinkPerfSVG):
tag = 'svg_full_frame'
test = _BlinkPerfFullFrameMeasurement
class BlinkPerfShadowDOM(benchmark.Benchmark):
tag = 'shadow_dom'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'ShadowDOM')
return CreatePageSetFromPath(path, SKIPPED_FILE)
# This benchmark is for local testing, doesn't need to run on bots.
@benchmark.Disabled()
class BlinkPerfXMLHttpRequest(benchmark.Benchmark):
tag = 'xml_http_request'
test = _BlinkPerfMeasurement
def CreatePageSet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'XMLHttpRequest')
return CreatePageSetFromPath(path, SKIPPED_FILE)
|
|
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A utility class to send to and recv from a non-blocking socket."""
from __future__ import with_statement
import sys
import zmq
from zmq.utils import jsonapi
try:
import cPickle as pickle
except ImportError:
import pickle
from .ioloop import IOLoop
try:
# gen_log will only import from >= 3.0
from tornado.log import gen_log
from tornado import stack_context
except ImportError:
from .minitornado.log import gen_log
from .minitornado import stack_context
try:
from queue import Queue
except ImportError:
from Queue import Queue
from zmq.utils.strtypes import bytes, unicode, basestring
try:
callable
except NameError:
callable = lambda obj: hasattr(obj, '__call__')
class ZMQStream(object):
"""A utility class to register callbacks when a zmq socket sends and receives
For use with zmq.eventloop.ioloop
There are three main methods
Methods:
* **on_recv(callback, copy=True):**
register a callback to be run every time the socket has something to receive
* **on_send(callback):**
register a callback to be run every time you call send
* **send(self, msg, flags=0, copy=False, callback=None):**
perform a send that will trigger the callback
if callback is passed, on_send is also called.
There are also send_multipart(), send_json(), send_pyobj()
Three other methods for deactivating the callbacks:
* **stop_on_recv():**
turn off the recv callback
* **stop_on_send():**
turn off the send callback
which simply call ``on_<evt>(None)``.
The entire socket interface, excluding direct recv methods, is also
provided, primarily through direct-linking the methods.
e.g.
>>> stream.bind is stream.socket.bind
True
"""
socket = None
io_loop = None
poller = None
def __init__(self, socket, io_loop=None):
self.socket = socket
self.io_loop = io_loop or IOLoop.instance()
self.poller = zmq.Poller()
self._send_queue = Queue()
self._recv_callback = None
self._send_callback = None
self._close_callback = None
self._recv_copy = False
self._flushed = False
self._state = self.io_loop.ERROR
self._init_io_state()
# shortcircuit some socket methods
self.bind = self.socket.bind
self.bind_to_random_port = self.socket.bind_to_random_port
self.connect = self.socket.connect
self.setsockopt = self.socket.setsockopt
self.getsockopt = self.socket.getsockopt
self.setsockopt_string = self.socket.setsockopt_string
self.getsockopt_string = self.socket.getsockopt_string
self.setsockopt_unicode = self.socket.setsockopt_unicode
self.getsockopt_unicode = self.socket.getsockopt_unicode
def stop_on_recv(self):
"""Disable callback and automatic receiving."""
return self.on_recv(None)
def stop_on_send(self):
"""Disable callback on sending."""
return self.on_send(None)
def stop_on_err(self):
"""DEPRECATED, does nothing"""
gen_log.warn("on_err does nothing, and will be removed")
def on_err(self, callback):
"""DEPRECATED, does nothing"""
gen_log.warn("on_err does nothing, and will be removed")
def on_recv(self, callback, copy=True):
"""Register a callback for when a message is ready to recv.
There can be only one callback registered at a time, so each
call to `on_recv` replaces previously registered callbacks.
on_recv(None) disables recv event polling.
Use on_recv_stream(callback) instead, to register a callback that will receive
both this ZMQStream and the message, instead of just the message.
Parameters
----------
callback : callable
callback must take exactly one argument, which will be a
list, as returned by socket.recv_multipart()
if callback is None, recv callbacks are disabled.
copy : bool
copy is passed directly to recv, so if copy is False,
callback will receive Message objects. If copy is True,
then callback will receive bytes/str objects.
Returns : None
"""
self._check_closed()
assert callback is None or callable(callback)
self._recv_callback = stack_context.wrap(callback)
self._recv_copy = copy
if callback is None:
self._drop_io_state(self.io_loop.READ)
else:
self._add_io_state(self.io_loop.READ)
def on_recv_stream(self, callback, copy=True):
"""Same as on_recv, but callback will get this stream as first argument
callback must take exactly two arguments, as it will be called as::
callback(stream, msg)
Useful when a single callback should be used with multiple streams.
"""
if callback is None:
self.stop_on_recv()
else:
self.on_recv(lambda msg: callback(self, msg), copy=copy)
def on_send(self, callback):
"""Register a callback to be called on each send
There will be two arguments::
callback(msg, status)
* `msg` will be the list of sendable objects that was just sent
* `status` will be the return result of socket.send_multipart(msg) -
MessageTracker or None.
Non-copying sends return a MessageTracker object whose
`done` attribute will be True when the send is complete.
This allows users to track when an object is safe to write to
again.
The second argument will always be None if copy=True
on the send.
Use on_send_stream(callback) to register a callback that will be passed
this ZMQStream as the first argument, in addition to the other two.
on_send(None) disables recv event polling.
Parameters
----------
callback : callable
callback must take exactly two arguments, which will be
the message being sent (always a list),
and the return result of socket.send_multipart(msg) -
MessageTracker or None.
if callback is None, send callbacks are disabled.
"""
self._check_closed()
assert callback is None or callable(callback)
self._send_callback = stack_context.wrap(callback)
def on_send_stream(self, callback):
"""Same as on_send, but callback will get this stream as first argument
Callback will be passed three arguments::
callback(stream, msg, status)
Useful when a single callback should be used with multiple streams.
"""
if callback is None:
self.stop_on_send()
else:
self.on_send(lambda msg, status: callback(self, msg, status))
def send(self, msg, flags=0, copy=True, track=False, callback=None):
"""Send a message, optionally also register a new callback for sends.
See zmq.socket.send for details.
"""
return self.send_multipart([msg], flags=flags, copy=copy, track=track, callback=callback)
def send_multipart(self, msg, flags=0, copy=True, track=False, callback=None):
"""Send a multipart message, optionally also register a new callback for sends.
See zmq.socket.send_multipart for details.
"""
kwargs = dict(flags=flags, copy=copy, track=track)
self._send_queue.put((msg, kwargs))
callback = callback or self._send_callback
if callback is not None:
self.on_send(callback)
else:
# noop callback
self.on_send(lambda *args: None)
self._add_io_state(self.io_loop.WRITE)
def send_string(self, u, flags=0, encoding='utf-8', callback=None):
"""Send a unicode message with an encoding.
See zmq.socket.send_unicode for details.
"""
if not isinstance(u, basestring):
raise TypeError("unicode/str objects only")
return self.send(u.encode(encoding), flags=flags, callback=callback)
send_unicode = send_string
def send_json(self, obj, flags=0, callback=None):
"""Send json-serialized version of an object.
See zmq.socket.send_json for details.
"""
if jsonapi is None:
raise ImportError('jsonlib{1,2}, json or simplejson library is required.')
else:
msg = jsonapi.dumps(obj)
return self.send(msg, flags=flags, callback=callback)
def send_pyobj(self, obj, flags=0, protocol=-1, callback=None):
"""Send a Python object as a message using pickle to serialize.
See zmq.socket.send_json for details.
"""
msg = pickle.dumps(obj, protocol)
return self.send(msg, flags, callback=callback)
def _finish_flush(self):
"""callback for unsetting _flushed flag."""
self._flushed = False
def flush(self, flag=zmq.POLLIN|zmq.POLLOUT, limit=None):
"""Flush pending messages.
This method safely handles all pending incoming and/or outgoing messages,
bypassing the inner loop, passing them to the registered callbacks.
A limit can be specified, to prevent blocking under high load.
flush will return the first time ANY of these conditions are met:
* No more events matching the flag are pending.
* the total number of events handled reaches the limit.
Note that if ``flag|POLLIN != 0``, recv events will be flushed even if no callback
is registered, unlike normal IOLoop operation. This allows flush to be
used to remove *and ignore* incoming messages.
Parameters
----------
flag : int, default=POLLIN|POLLOUT
0MQ poll flags.
If flag|POLLIN, recv events will be flushed.
If flag|POLLOUT, send events will be flushed.
Both flags can be set at once, which is the default.
limit : None or int, optional
The maximum number of messages to send or receive.
Both send and recv count against this limit.
Returns
-------
int : count of events handled (both send and recv)
"""
self._check_closed()
# unset self._flushed, so callbacks will execute, in case flush has
# already been called this iteration
already_flushed = self._flushed
self._flushed = False
# initialize counters
count = 0
def update_flag():
"""Update the poll flag, to prevent registering POLLOUT events
if we don't have pending sends."""
return flag & zmq.POLLIN | (self.sending() and flag & zmq.POLLOUT)
flag = update_flag()
if not flag:
# nothing to do
return 0
self.poller.register(self.socket, flag)
events = self.poller.poll(0)
while events and (not limit or count < limit):
s,event = events[0]
if event & zmq.POLLIN: # receiving
self._handle_recv()
count += 1
if self.socket is None:
# break if socket was closed during callback
break
if event & zmq.POLLOUT and self.sending():
self._handle_send()
count += 1
if self.socket is None:
# break if socket was closed during callback
break
flag = update_flag()
if flag:
self.poller.register(self.socket, flag)
events = self.poller.poll(0)
else:
events = []
if count: # only bypass loop if we actually flushed something
# skip send/recv callbacks this iteration
self._flushed = True
# reregister them at the end of the loop
if not already_flushed: # don't need to do it again
self.io_loop.add_callback(self._finish_flush)
elif already_flushed:
self._flushed = True
# update ioloop poll state, which may have changed
self._rebuild_io_state()
return count
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed."""
self._close_callback = stack_context.wrap(callback)
def close(self, linger=None):
"""Close this stream."""
if self.socket is not None:
self.io_loop.remove_handler(self.socket)
self.socket.close(linger)
self.socket = None
if self._close_callback:
self._run_callback(self._close_callback)
def receiving(self):
"""Returns True if we are currently receiving from the stream."""
return self._recv_callback is not None
def sending(self):
"""Returns True if we are currently sending to the stream."""
return not self._send_queue.empty()
def closed(self):
return self.socket is None
def _run_callback(self, callback, *args, **kwargs):
"""Wrap running callbacks in try/except to allow us to
close our socket."""
try:
# Use a NullContext to ensure that all StackContexts are run
# inside our blanket exception handler rather than outside.
with stack_context.NullContext():
callback(*args, **kwargs)
except:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close()
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
def _handle_events(self, fd, events):
"""This method is the actual handler for IOLoop, that gets called whenever
an event on my socket is posted. It dispatches to _handle_recv, etc."""
# print "handling events"
if not self.socket:
gen_log.warning("Got events for closed stream %s", fd)
return
try:
# dispatch events:
if events & IOLoop.ERROR:
gen_log.error("got POLLERR event on ZMQStream, which doesn't make sense")
return
if events & IOLoop.READ:
self._handle_recv()
if not self.socket:
return
if events & IOLoop.WRITE:
self._handle_send()
if not self.socket:
return
# rebuild the poll state
self._rebuild_io_state()
except:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close()
raise
def _handle_recv(self):
"""Handle a recv event."""
if self._flushed:
return
try:
msg = self.socket.recv_multipart(zmq.NOBLOCK, copy=self._recv_copy)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
# state changed since poll event
pass
else:
gen_log.error("RECV Error: %s"%zmq.strerror(e.errno))
else:
if self._recv_callback:
callback = self._recv_callback
# self._recv_callback = None
self._run_callback(callback, msg)
# self.update_state()
def _handle_send(self):
"""Handle a send event."""
if self._flushed:
return
if not self.sending():
gen_log.error("Shouldn't have handled a send event")
return
msg, kwargs = self._send_queue.get()
try:
status = self.socket.send_multipart(msg, **kwargs)
except zmq.ZMQError as e:
gen_log.error("SEND Error: %s", e)
status = e
if self._send_callback:
callback = self._send_callback
self._run_callback(callback, msg, status)
# self.update_state()
def _check_closed(self):
if not self.socket:
raise IOError("Stream is closed")
def _rebuild_io_state(self):
"""rebuild io state based on self.sending() and receiving()"""
if self.socket is None:
return
state = self.io_loop.ERROR
if self.receiving():
state |= self.io_loop.READ
if self.sending():
state |= self.io_loop.WRITE
if state != self._state:
self._state = state
self._update_handler(state)
def _add_io_state(self, state):
"""Add io_state to poller."""
if not self._state & state:
self._state = self._state | state
self._update_handler(self._state)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
if self._state & state:
self._state = self._state & (~state)
self._update_handler(self._state)
def _update_handler(self, state):
"""Update IOLoop handler with state."""
if self.socket is None:
return
self.io_loop.update_handler(self.socket, state)
def _init_io_state(self):
"""initialize the ioloop event handler"""
with stack_context.NullContext():
self.io_loop.add_handler(self.socket, self._handle_events, self._state)
|
|
# -*- coding: utf-8 -*-
'''
Many aspects of the salt payload need to be managed, from the return of
encrypted keys to general payload dynamics and packaging, these happen
in here
'''
# Import python libs
from __future__ import absolute_import
# import sys # Use if sys is commented out below
import logging
import gc
import datetime
# Import salt libs
import salt.log
import salt.crypt
from salt.exceptions import SaltReqTimeoutError
# Import third party libs
import salt.ext.six as six
try:
import zmq
except ImportError:
# No need for zeromq in local mode
pass
log = logging.getLogger(__name__)
HAS_MSGPACK = False
try:
# Attempt to import msgpack
import msgpack
# There is a serialization issue on ARM and potentially other platforms
# for some msgpack bindings, check for it
if msgpack.loads(msgpack.dumps([1, 2, 3]), use_list=True) is None:
raise ImportError
HAS_MSGPACK = True
except ImportError:
# Fall back to msgpack_pure
try:
import msgpack_pure as msgpack # pylint: disable=import-error
HAS_MSGPACK = True
except ImportError:
# TODO: Come up with a sane way to get a configured logfile
# and write to the logfile when this error is hit also
LOG_FORMAT = '[%(levelname)-8s] %(message)s'
salt.log.setup_console_logger(log_format=LOG_FORMAT)
log.fatal('Unable to import msgpack or msgpack_pure python modules')
# Don't exit if msgpack is not available, this is to make local mode
# work without msgpack
#sys.exit(salt.defaults.exitcodes.EX_GENERIC)
if HAS_MSGPACK and not hasattr(msgpack, 'exceptions'):
class PackValueError(Exception):
'''
older versions of msgpack do not have PackValueError
'''
class exceptions(object):
'''
older versions of msgpack do not have an exceptions module
'''
PackValueError = PackValueError()
msgpack.exceptions = exceptions()
def package(payload):
'''
This method for now just wraps msgpack.dumps, but it is here so that
we can make the serialization a custom option in the future with ease.
'''
return msgpack.dumps(payload)
def unpackage(package_):
'''
Unpackages a payload
'''
return msgpack.loads(package_, use_list=True)
def format_payload(enc, **kwargs):
'''
Pass in the required arguments for a payload, the enc type and the cmd,
then a list of keyword args to generate the body of the load dict.
'''
payload = {'enc': enc}
load = {}
for key in kwargs:
load[key] = kwargs[key]
payload['load'] = load
return package(payload)
class Serial(object):
'''
Create a serialization object, this object manages all message
serialization in Salt
'''
def __init__(self, opts):
if isinstance(opts, dict):
self.serial = opts.get('serial', 'msgpack')
elif isinstance(opts, str):
self.serial = opts
else:
self.serial = 'msgpack'
def loads(self, msg):
'''
Run the correct loads serialization format
'''
try:
gc.disable() # performance optimization for msgpack
return msgpack.loads(msg, use_list=True)
except Exception as exc:
log.critical('Could not deserialize msgpack message.'
'This often happens when trying to read a file not in binary mode'
'To see message payload, enable debug logging and retry. Exception: {0}'.format(exc))
log.debug('Msgpack deserialization failure on message: {0}'.format(msg))
raise
finally:
gc.enable()
def load(self, fn_):
'''
Run the correct serialization to load a file
'''
data = fn_.read()
fn_.close()
if data:
return self.loads(data)
def dumps(self, msg):
'''
Run the correct dumps serialization format
'''
try:
return msgpack.dumps(msg)
except (OverflowError, msgpack.exceptions.PackValueError):
# msgpack can't handle the very long Python longs for jids
# Convert any very long longs to strings
# We borrow the technique used by TypeError below
def verylong_encoder(obj):
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = verylong_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = verylong_encoder(entry)
return obj
# This is a spurious lint failure as we are gating this check
# behind a check for six.PY2.
if six.PY2 and isinstance(obj, long) and long > pow(2, 64): # pylint: disable=incompatible-py3-code
return str(obj)
elif six.PY3 and isinstance(obj, int) and int > pow(2, 64):
return str(obj)
else:
return obj
return msgpack.dumps(verylong_encoder(msg))
except TypeError as e:
# msgpack doesn't support datetime.datetime datatype
# So here we have converted datetime.datetime to custom datatype
# This is msgpack Extended types numbered 78
def default(obj):
return msgpack.ExtType(78, obj)
def dt_encode(obj):
datetime_str = obj.strftime("%Y%m%dT%H:%M:%S.%f")
return msgpack.packb(datetime_str, default=default)
def datetime_encoder(obj):
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = datetime_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = datetime_encoder(entry)
return obj
if isinstance(obj, datetime.datetime):
return dt_encode(obj)
else:
return obj
if "datetime.datetime" in str(e):
return msgpack.dumps(datetime_encoder(msg))
if msgpack.version >= (0, 2, 0):
# Should support OrderedDict serialization, so, let's
# raise the exception
raise
# msgpack is < 0.2.0, let's make its life easier
# Since OrderedDict is identified as a dictionary, we can't
# make use of msgpack custom types, we will need to convert by
# hand.
# This means iterating through all elements of a dictionary or
# list/tuple
def odict_encoder(obj):
if isinstance(obj, dict):
for key, value in six.iteritems(obj.copy()):
obj[key] = odict_encoder(value)
return dict(obj)
elif isinstance(obj, (list, tuple)):
obj = list(obj)
for idx, entry in enumerate(obj):
obj[idx] = odict_encoder(entry)
return obj
return obj
return msgpack.dumps(odict_encoder(msg))
except (SystemError, TypeError) as exc: # pylint: disable=W0705
log.critical('Unable to serialize message! Consider upgrading msgpack. '
'Message which failed was {failed_message} '
'with exception {exception_message}').format(msg, exc)
def dump(self, msg, fn_):
'''
Serialize the correct data into the named file object
'''
fn_.write(self.dumps(msg))
fn_.close()
class SREQ(object):
'''
Create a generic interface to wrap salt zeromq req calls.
'''
def __init__(self, master, id_='', serial='msgpack', linger=0, opts=None):
self.master = master
self.id_ = id_
self.serial = Serial(serial)
self.linger = linger
self.context = zmq.Context()
self.poller = zmq.Poller()
self.opts = opts
@property
def socket(self):
'''
Lazily create the socket.
'''
if not hasattr(self, '_socket'):
# create a new one
self._socket = self.context.socket(zmq.REQ)
if hasattr(zmq, 'RECONNECT_IVL_MAX'):
self._socket.setsockopt(
zmq.RECONNECT_IVL_MAX, 5000
)
self._set_tcp_keepalive()
if self.master.startswith('tcp://['):
# Hint PF type if bracket enclosed IPv6 address
if hasattr(zmq, 'IPV6'):
self._socket.setsockopt(zmq.IPV6, 1)
elif hasattr(zmq, 'IPV4ONLY'):
self._socket.setsockopt(zmq.IPV4ONLY, 0)
self._socket.linger = self.linger
if self.id_:
self._socket.setsockopt(zmq.IDENTITY, self.id_)
self._socket.connect(self.master)
return self._socket
def _set_tcp_keepalive(self):
if hasattr(zmq, 'TCP_KEEPALIVE') and self.opts:
if 'tcp_keepalive' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE, self.opts['tcp_keepalive']
)
if 'tcp_keepalive_idle' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_IDLE, self.opts['tcp_keepalive_idle']
)
if 'tcp_keepalive_cnt' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_CNT, self.opts['tcp_keepalive_cnt']
)
if 'tcp_keepalive_intvl' in self.opts:
self._socket.setsockopt(
zmq.TCP_KEEPALIVE_INTVL, self.opts['tcp_keepalive_intvl']
)
def clear_socket(self):
'''
delete socket if you have it
'''
if hasattr(self, '_socket'):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
log.trace('Unregistering socket: {0}'.format(socket))
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
log.trace('Unregistering socket: {0}'.format(socket))
self.poller.unregister(socket[0])
del self._socket
def send(self, enc, load, tries=1, timeout=60):
'''
Takes two arguments, the encryption type and the base payload
'''
payload = {'enc': enc}
payload['load'] = load
pkg = self.serial.dumps(payload)
self.socket.send(pkg)
self.poller.register(self.socket, zmq.POLLIN)
tried = 0
while True:
polled = self.poller.poll(timeout * 1000)
tried += 1
if polled:
break
if tries > 1:
log.info('SaltReqTimeoutError: after {0} seconds. (Try {1} of {2})'.format(
timeout, tried, tries))
if tried >= tries:
self.clear_socket()
raise SaltReqTimeoutError(
'SaltReqTimeoutError: after {0} seconds, ran {1} tries'.format(timeout * tried, tried)
)
return self.serial.loads(self.socket.recv())
def send_auto(self, payload, tries=1, timeout=60):
'''
Detect the encryption type based on the payload
'''
enc = payload.get('enc', 'clear')
load = payload.get('load', {})
return self.send(enc, load, tries, timeout)
def destroy(self):
if isinstance(self.poller.sockets, dict):
sockets = list(self.poller.sockets.keys())
for socket in sockets:
if socket.closed is False:
socket.setsockopt(zmq.LINGER, 1)
socket.close()
self.poller.unregister(socket)
else:
for socket in self.poller.sockets:
if socket[0].closed is False:
socket[0].setsockopt(zmq.LINGER, 1)
socket[0].close()
self.poller.unregister(socket[0])
if self.socket.closed is False:
self.socket.setsockopt(zmq.LINGER, 1)
self.socket.close()
if self.context.closed is False:
self.context.term()
def __del__(self):
self.destroy()
|
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from lttngc import utils, model, control
from collections import OrderedDict
import xml.etree.ElementTree as ET
import subprocess
import logging
def _bool_from_node_text(text):
text = text.lower()
return True if text == 'true' or text == '1' else False
class CommandError(RuntimeError):
@property
def args(self):
return self._args
@args.setter
def args(self, value):
self._args = value
class LttngCliControl(control.Control):
_DOMAIN_TYPE_TO_ENUM = {
'KERNEL': model.Domain.KERNEL,
'UST': model.Domain.USER,
'JUL': model.Domain.JUL,
'LOG4J': model.Domain.LOG4J,
'PYTHON': model.Domain.PYTHON,
}
_CHANNEL_MODE_NAME_TO_ENUM = {
'OVERWRITE': model.ChannelMode.OVERWRITE,
'DISCARD': model.ChannelMode.DISCARD,
}
_CHANNEL_OUTPUT_TYPE_NAME_TO_ENUM = {
'SPLICE': model.ChannelOutputType.SPLICE,
'MMAP': model.ChannelOutputType.MMAP,
}
_CHANNEL_BUFFER_SCHEME_NAME_TO_ENUM = {
'GLOBAL': model.ChannelBufferScheme.GLOBAL,
'PER_UID': model.ChannelBufferScheme.PER_UID,
'PER_PID': model.ChannelBufferScheme.PER_PID,
}
_EVENT_TYPE_NAME_TO_ENUM = {
'ALL': model.EventType.ALL,
'TRACEPOINT': model.EventType.TRACEPOINT,
'PROBE': model.EventType.PROBE,
'FUNCTION': model.EventType.FUNCTION,
'FUNCTION_ENTRY': model.EventType.FUNCTION_ENTRY,
'NOOP': model.EventType.NOOP,
'SYSCALL': model.EventType.SYSCALL,
'KPROBE': model.EventType.KPROBE,
'KRETPROBE': model.EventType.KRETPROBE,
}
_LOG_LEVEL_TYPE_NAME_TO_ENUM = {
'SINGLE': model.LogLevelFunction.EQ,
'RANGE': model.LogLevelFunction.LT_EQ,
'ALL': model.LogLevelFunction.ANY,
}
_DOMAIN_TO_OPTION = {
model.Domain.KERNEL: '-k',
model.Domain.USER: '-u',
model.Domain.JUL: '-j',
model.Domain.LOG4J: '-l',
model.Domain.PYTHON: '-p',
}
_EVENT_TYPE_TO_OPTION = {
model.EventType.TRACEPOINT: '--tracepoint',
model.EventType.SYSCALL: '--syscall',
}
_CHANNEL_MODE_TO_OPTION = {
model.ChannelMode.DISCARD: '--discard',
model.ChannelMode.OVERWRITE: '--overwrite',
}
_CHANNEL_OUTPUT_TYPE_TO_OPTION_TYPE = {
model.ChannelOutputType.SPLICE: 'splice',
model.ChannelOutputType.MMAP: 'mmap',
}
_CHANNEL_BUFFER_SCHEME_TO_OPTION = {
model.ChannelBufferScheme.GLOBAL: '--buffers-global',
model.ChannelBufferScheme.PER_UID: '--buffers-uid',
model.ChannelBufferScheme.PER_PID: '--buffers-pid',
}
_LOG_LEVEL_FUNCTION_TO_OPTION = {
model.LogLevelFunction.LT_EQ: '--loglevel',
model.LogLevelFunction.EQ: '--loglevel-only',
}
def __init__(self, lttng_cli_path='/usr/bin/lttng'):
self._lttng_cli_path = lttng_cli_path
self._logger = logging.getLogger(type(self).__name__)
self._version = self.get_version()
self._last_args = []
self._last_stdout = None
self._last_stderr = None
self._last_return_code = None
self._last_pid = None
@property
def last_args(self):
return self._last_args
@property
def last_stdout(self):
return self._last_stdout
@property
def last_stderr(self):
return self._last_stderr
@property
def last_return_code(self):
return self._last_return_code
@property
def last_pid(self):
return self._last_pid
def _do_command(self, args):
full_args = [self._lttng_cli_path, '--mi', 'xml'] + args
self._last_args = full_args
self._logger.debug('Command: ' + str(full_args))
try:
p = subprocess.Popen(full_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
except:
e = CommandError('Failed to execute command')
e.args = full_args
self._last_stdout = None
self._last_stderr = None
self._last_return_code = None
self._last_pid = None
raise e
self._last_stdout = stdout.decode()
self._last_stderr = stderr.decode()
self._last_return_code = p.returncode
self._last_pid = p.pid
self._logger.debug('Command output:\n' + self._last_stdout)
try:
cmd_node = ET.fromstring(self._last_stdout)
except:
e = CommandError('Failed to parse command output')
e.args = full_args
raise e
return cmd_node
@staticmethod
def _get_output_node(cmd_node):
return cmd_node.find('output')
@staticmethod
def _get_success_node(cmd_node):
return cmd_node.find('success')
@staticmethod
def _is_cmd_success(cmd_node):
success_node = LttngCliControl._get_success_node(cmd_node)
success = _bool_from_node_text(success_node.text)
return success
def get_version(self):
try:
cmd_node = self._do_command(['version'])
except:
raise control.ControlError('Cannot execute version command')
version_node = self._get_output_node(cmd_node).find('version')
string = version_node.find('string').text
major = version_node.find('major').text
minor = version_node.find('minor').text
commit = version_node.find('commit').text
patch_level = version_node.find('patchLevel').text
name = version_node.find('name').text
description = version_node.find('description').text
url = version_node.find('url').text
license = version_node.find('license').text
version = model.Version(string, major, minor, commit, patch_level,
name, description, url, license)
return version
@staticmethod
def _session_summary_from_node(session_node):
name = session_node.find('name').text
output_path = session_node.find('path').text
enabled = session_node.find('enabled').text
enabled = _bool_from_node_text(enabled)
snapshot_mode = session_node.find('snapshot_mode').text
is_snapshot_mode = _bool_from_node_text(snapshot_mode)
live_timer_interval = session_node.find('live_timer_interval').text
live_timer_interval = utils.usec_to_sec(live_timer_interval)
summary = model.SessionSummary(name, output_path, enabled,
is_snapshot_mode, live_timer_interval)
return summary
def get_session_summaries(self):
try:
cmd_node = self._do_command(['list'])
except:
raise control.ControlError('Cannot execute list command')
sessions_node = self._get_output_node(cmd_node).find('sessions')
if sessions_node is None:
raise control.ControlError('Cannot execute list command')
summaries = OrderedDict()
for session_node in sessions_node:
summary = self._session_summary_from_node(session_node)
summaries[summary.name] = summary
return summaries
@staticmethod
def _event_source_from_node(event_node):
name = event_node.find('name').text
type_node = event_node.find('type')
type = LttngCliControl._EVENT_TYPE_NAME_TO_ENUM[type_node.text]
return model.EventSource(name, type)
@staticmethod
def _event_from_node(event_node):
enabled = event_node.find('enabled').text
enabled = _bool_from_node_text(enabled)
filt = event_node.find('filter').text
has_filter = _bool_from_node_text(filt)
exclusions = None
exclusion_node = event_node.find('exclusion')
if exclusion_node is not None:
exclusions = _bool_from_node_text(exclusion_node.text)
log_level = None
log_level_node = event_node.find('loglevel')
if log_level_node is not None:
log_level = log_level_node.text
log_level_func = None
log_level_type_node = event_node.find('loglevel_type')
if log_level_type_node is not None:
log_level_type = log_level_type_node.text
log_level_func = LttngCliControl._LOG_LEVEL_TYPE_NAME_TO_ENUM[log_level_type]
source = LttngCliControl._event_source_from_node(event_node)
event = model.Event(source, enabled, has_filter, exclusions,
log_level_func, log_level)
return event
@staticmethod
def _channel_attributes_from_node(channel_node, bt):
attr = model.ChannelAttributes()
attr_node = channel_node.find('attributes')
mode_node = attr_node.find('overwrite_mode')
attr.mode = LttngCliControl._CHANNEL_MODE_NAME_TO_ENUM[mode_node.text]
attr.subbuf_size = int(attr_node.find('subbuffer_size').text)
attr.subbuf_count = int(attr_node.find('subbuffer_count').text)
switch_timer_interval = attr_node.find('switch_timer_interval').text
attr.switch_timer_interval = utils.usec_to_sec(switch_timer_interval)
read_timer_interval = attr_node.find('read_timer_interval').text
attr.read_timer_interval = utils.usec_to_sec(read_timer_interval)
ot = attr_node.find('output_type').text
output_type = LttngCliControl._CHANNEL_OUTPUT_TYPE_NAME_TO_ENUM[ot]
attr.output_type = output_type
attr.tracefile_size = int(attr_node.find('tracefile_size').text)
attr.tracefile_count = int(attr_node.find('tracefile_count').text)
buffer_scheme = LttngCliControl._CHANNEL_BUFFER_SCHEME_NAME_TO_ENUM[bt]
attr.buffer_scheme = buffer_scheme
return attr
@staticmethod
def _events_from_event_nodes(domain, events_node):
# Assign unique IDs per group of events sharing the exact
# same properties here (except their ID). This is needed
# because such identical objects may exist on the LTTng side,
# so there must be a way to uniquely identify them here.
#
# event_next_eids is a dictionary mapping event tuples (without
# ID) to the next unique ID to use for this group.
event_next_eids = {}
events = []
for event_node in events_node:
event = LttngCliControl._event_from_node(event_node)
event.source.domain = domain
tuple_without_eid = event.get_tuple_without_eid()
if tuple_without_eid in event_next_eids:
# retrieve next ID for this group
next_eid = event_next_eids[tuple_without_eid]
event.eid = next_eid
# update group's next ID
event_next_eids[tuple_without_eid] = next_eid + 1
else:
# initialize next ID for this group
event.eid = 0
event_next_eids[tuple_without_eid] = 1
events.append(event)
return events
@staticmethod
def _channel_from_node(channel_node, domain, buffer_type):
name = channel_node.find('name').text
enabled = channel_node.find('enabled').text
enabled = _bool_from_node_text(enabled)
attr = LttngCliControl._channel_attributes_from_node(channel_node,
buffer_type)
events_node = channel_node.find('events')
events = LttngCliControl._events_from_event_nodes(domain, events_node)
channel = model.Channel(name, domain, enabled, attr, events)
# back reference
for event in channel.events:
event.channel = channel
return channel
def get_session(self, sname):
try:
cmd_node = self._do_command(['list', sname])
except:
raise control.ControlError('Cannot execute list command')
sessions_node = self._get_output_node(cmd_node).find('sessions')
if len(sessions_node) == 0:
raise control.ControlError('No such session "{}"'.format(sname))
session_node = sessions_node[0]
summary = self._session_summary_from_node(session_node)
domains_node = session_node.find('domains')
channels = []
for domain_node in domains_node:
type_node = domain_node.find('type')
buffer_type = domain_node.find('buffer_type')
channels_node = domain_node.find('channels')
domain = self._DOMAIN_TYPE_TO_ENUM[type_node.text]
if channels_node is not None:
for channel_node in channels_node:
channel = self._channel_from_node(channel_node, domain,
buffer_type.text)
channels.append(channel)
else:
channel = model.Channel.create_pseudo(domain)
events_node = domain_node.find('events')
events = self._events_from_event_nodes(domain, events_node)
channel.events = events
# back reference
for event in events:
event.channel = channel
channels.append(channel)
return model.Session(summary, channels)
@staticmethod
def _append_event_sources_from_node(event_sources, events_node, domain):
for event_node in events_node:
event_source = LttngCliControl._event_source_from_node(event_node)
event_source.domain = domain
event_sources.append(event_source)
def get_event_sources(self, domain):
option = self._DOMAIN_TO_OPTION[domain]
try:
cmd_node = self._do_command(['list', option])
except:
raise control.ControlError('Cannot execute list command')
domains_node = self._get_output_node(cmd_node).find('domains')
event_sources = []
if domain == model.Domain.KERNEL:
events_node = domains_node.find('domain').find('events')
# get syscalls also
try:
cmd_node = self._do_command(['list', option, '--syscall'])
except:
raise control.ControlError('Cannot execute list command')
sys_events_node = self._get_output_node(cmd_node).find('events')
for node in sys_events_node:
events_node.append(node)
self._append_event_sources_from_node(event_sources, events_node,
domain)
else:
processes_node = domains_node.find('domain').find('processes')
for process_node in processes_node:
events_node = process_node.find('events')
self._append_event_sources_from_node(event_sources, events_node,
domain)
return event_sources
def _do_simple_success_command(self, name, args=None):
try:
cmd_node = self._do_command([name] + args)
except:
raise control.ControlError('Cannot execute {} command'.format(name))
if not self._is_cmd_success(cmd_node):
raise control.ControlError('{} command failed'.format(name))
def create_session(self, sname, output_path, no_output):
args = []
if sname:
args += [sname]
if no_output:
args.append('--no-output')
elif output_path is not None:
args += ['--output', output_path]
self._do_simple_success_command('create', args)
def destroy_session(self, sname):
self._do_simple_success_command('destroy', [sname])
def enable_event(self, sname, domain, cname, etype, ename,
log_level_function, log_level, efilter):
args = [
self._DOMAIN_TO_OPTION[domain],
'-s', sname,
self._EVENT_TYPE_TO_OPTION[etype],
ename
]
if domain in [model.Domain.KERNEL, model.Domain.USER]:
args += ['-c', cname]
if efilter is not None:
args += ['--filter', efilter]
if log_level_function is not None and log_level is not None:
if log_level_function in self._LOG_LEVEL_FUNCTION_TO_OPTION:
args += [
self._LOG_LEVEL_FUNCTION_TO_OPTION[log_level_function],
log_level,
]
self._do_simple_success_command('enable-event', args)
def disable_event(self, sname, domain, cname, etype, ename):
args = [
self._DOMAIN_TO_OPTION[domain],
'-s', sname,
ename
]
if domain in [model.Domain.KERNEL, model.Domain.USER]:
args += ['-c', cname]
if etype == model.EventType.SYSCALL:
args.append('--syscall')
self._do_simple_success_command('disable-event', args)
def enable_channel(self, sname, domain, cname, attributes):
args = [
self._DOMAIN_TO_OPTION[domain],
'-s', sname,
]
if attributes.mode is not None:
args.append(self._CHANNEL_MODE_TO_OPTION[attributes.mode])
if attributes.subbuf_size is not None:
args += ['--subbuf-size', str(attributes.subbuf_size)]
if attributes.subbuf_count is not None:
args += ['--num-subbuf', str(attributes.subbuf_count)]
if attributes.switch_timer_interval is not None:
args += [
'--switch-timer',
str(utils.sec_to_usec(attributes.switch_timer_interval))
]
if attributes.read_timer_interval is not None:
args += [
'--read-timer',
str(utils.sec_to_usec(attributes.read_timer_interval))
]
if attributes.output_type is not None:
d = self._CHANNEL_OUTPUT_TYPE_TO_OPTION_TYPE
args += ['--output', d[attributes.output_type]]
if attributes.tracefile_size is not None:
args += ['--tracefile-size', str(attributes.tracefile_size)]
if attributes.tracefile_count is not None:
args += ['--tracefile-count', str(attributes.tracefile_count)]
if attributes.buffer_scheme is not None:
bs = attributes.buffer_scheme
args.append(self._CHANNEL_BUFFER_SCHEME_TO_OPTION[bs])
args.append(cname)
self._do_simple_success_command('enable-channel', args)
def disable_channel(self, sname, domain, cname):
args = [
self._DOMAIN_TO_OPTION[domain],
'-s', sname,
cname
]
self._do_simple_success_command('disable-channel', args)
def start_tracing(self, sname):
self._do_simple_success_command('start', [sname])
def stop_tracing(self, sname):
self._do_simple_success_command('stop', [sname])
def add_context(self, sname, domain, cname, types):
args = [
self._DOMAIN_TO_OPTION[domain],
'-s', sname,
'-c', cname,
]
for type in types:
args += ['-t', type]
self._do_simple_success_command('add-context', args)
def save_session(self, path, sname):
args = [
'-f',
'-o', path,
sname,
]
self._do_simple_success_command('save', args)
def load_session(self, path):
args = [
'-f',
'-i', path,
]
self._do_simple_success_command('load', args)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""TFRecord sources and sinks."""
from __future__ import absolute_import
import codecs
import logging
import struct
from builtins import object
from functools import partial
import crcmod
from apache_beam import coders
from apache_beam.io import filebasedsink
from apache_beam.io.filebasedsource import FileBasedSource
from apache_beam.io.filebasedsource import ReadAllFiles
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Write
from apache_beam.transforms import PTransform
__all__ = ['ReadFromTFRecord', 'WriteToTFRecord']
def _default_crc32c_fn(value):
"""Calculates crc32c by either snappy or crcmod based on installation."""
if not _default_crc32c_fn.fn:
try:
import snappy # pylint: disable=import-error
# Support multiple versions of python-snappy:
# https://github.com/andrix/python-snappy/pull/53
if getattr(snappy, '_crc32c', None):
_default_crc32c_fn.fn = snappy._crc32c # pylint: disable=protected-access
else:
_default_crc32c_fn.fn = snappy._snappy._crc32c # pylint: disable=protected-access
except ImportError:
logging.warning('Couldn\'t find python-snappy so the implementation of '
'_TFRecordUtil._masked_crc32c is not as fast as it could '
'be.')
_default_crc32c_fn.fn = crcmod.predefined.mkPredefinedCrcFun('crc-32c')
return _default_crc32c_fn.fn(value)
_default_crc32c_fn.fn = None
class _TFRecordUtil(object):
"""Provides basic TFRecord encoding/decoding with consistency checks.
For detailed TFRecord format description see:
https://www.tensorflow.org/versions/r1.11/api_guides/python/python_io#TFRecords_Format_Details
Note that masks and length are represented in LittleEndian order.
"""
@classmethod
def _masked_crc32c(cls, value, crc32c_fn=_default_crc32c_fn):
"""Compute a masked crc32c checksum for a value.
Args:
value: A string for which we compute the crc.
crc32c_fn: A function that can compute a crc32c.
This is a performance hook that also helps with testing. Callers are
not expected to make use of it directly.
Returns:
Masked crc32c checksum.
"""
crc = crc32c_fn(value)
return (((crc >> 15) | (crc << 17)) + 0xa282ead8) & 0xffffffff
@staticmethod
def encoded_num_bytes(record):
"""Return the number of bytes consumed by a record in its encoded form."""
# 16 = 8 (Length) + 4 (crc of length) + 4 (crc of data)
return len(record) + 16
@classmethod
def write_record(cls, file_handle, value):
"""Encode a value as a TFRecord.
Args:
file_handle: The file to write to.
value: A string content of the record.
"""
encoded_length = struct.pack('<Q', len(value))
file_handle.write('{}{}{}{}'.format(
encoded_length,
struct.pack('<I', cls._masked_crc32c(encoded_length)), #
value,
struct.pack('<I', cls._masked_crc32c(value))))
@classmethod
def read_record(cls, file_handle):
"""Read a record from a TFRecords file.
Args:
file_handle: The file to read from.
Returns:
None if EOF is reached; the paylod of the record otherwise.
Raises:
ValueError: If file appears to not be a valid TFRecords file.
"""
buf_length_expected = 12
buf = file_handle.read(buf_length_expected)
if not buf:
return None # EOF Reached.
# Validate all length related payloads.
if len(buf) != buf_length_expected:
raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' %
(buf_length_expected, codecs.encode(buf, 'hex')))
length, length_mask_expected = struct.unpack('<QI', buf)
length_mask_actual = cls._masked_crc32c(buf[:8])
if length_mask_actual != length_mask_expected:
raise ValueError('Not a valid TFRecord. Mismatch of length mask: %s' %
codecs.encode(buf, 'hex'))
# Validate all data related payloads.
buf_length_expected = length + 4
buf = file_handle.read(buf_length_expected)
if len(buf) != buf_length_expected:
raise ValueError('Not a valid TFRecord. Fewer than %d bytes: %s' %
(buf_length_expected, codecs.encode(buf, 'hex')))
data, data_mask_expected = struct.unpack('<%dsI' % length, buf)
data_mask_actual = cls._masked_crc32c(data)
if data_mask_actual != data_mask_expected:
raise ValueError('Not a valid TFRecord. Mismatch of data mask: %s' %
codecs.encode(buf, 'hex'))
# All validation checks passed.
return data
class _TFRecordSource(FileBasedSource):
"""A File source for reading files of TFRecords.
For detailed TFRecords format description see:
https://www.tensorflow.org/versions/r1.11/api_guides/python/python_io#TFRecords_Format_Details
"""
def __init__(self,
file_pattern,
coder,
compression_type,
validate):
"""Initialize a TFRecordSource. See ReadFromTFRecord for details."""
super(_TFRecordSource, self).__init__(
file_pattern=file_pattern,
compression_type=compression_type,
splittable=False,
validate=validate)
self._coder = coder
def read_records(self, file_name, offset_range_tracker):
if offset_range_tracker.start_position():
raise ValueError('Start position not 0:%s' %
offset_range_tracker.start_position())
current_offset = offset_range_tracker.start_position()
with self.open_file(file_name) as file_handle:
while True:
if not offset_range_tracker.try_claim(current_offset):
raise RuntimeError('Unable to claim position: %s' % current_offset)
record = _TFRecordUtil.read_record(file_handle)
if record is None:
return # Reached EOF
else:
current_offset += _TFRecordUtil.encoded_num_bytes(record)
yield self._coder.decode(record)
def _create_tfrecordio_source(
file_pattern=None, coder=None, compression_type=None):
# We intentionally disable validation for ReadAll pattern so that reading does
# not fail for globs (elements) that are empty.
return _TFRecordSource(file_pattern, coder, compression_type,
validate=False)
class ReadAllFromTFRecord(PTransform):
"""A ``PTransform`` for reading a ``PCollection`` of TFRecord files."""
def __init__(
self,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
**kwargs):
"""Initialize the ``ReadAllFromTFRecord`` transform.
Args:
coder: Coder used to decode each record.
compression_type: Used to handle compressed input files. Default value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
**kwargs: optional args dictionary. These are passed through to parent
constructor.
"""
super(ReadAllFromTFRecord, self).__init__(**kwargs)
source_from_file = partial(
_create_tfrecordio_source, compression_type=compression_type,
coder=coder)
# Desired and min bundle sizes do not matter since TFRecord files are
# unsplittable.
self._read_all_files = ReadAllFiles(
splittable=False, compression_type=compression_type,
desired_bundle_size=0, min_bundle_size=0,
source_from_file=source_from_file)
def expand(self, pvalue):
return pvalue | 'ReadAllFiles' >> self._read_all_files
class ReadFromTFRecord(PTransform):
"""Transform for reading TFRecord sources."""
def __init__(self,
file_pattern,
coder=coders.BytesCoder(),
compression_type=CompressionTypes.AUTO,
validate=True,
**kwargs):
"""Initialize a ReadFromTFRecord transform.
Args:
file_pattern: A file glob pattern to read TFRecords from.
coder: Coder used to decode each record.
compression_type: Used to handle compressed input files. Default value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
validate: Boolean flag to verify that the files exist during the pipeline
creation time.
**kwargs: optional args dictionary. These are passed through to parent
constructor.
Returns:
A ReadFromTFRecord transform object.
"""
super(ReadFromTFRecord, self).__init__(**kwargs)
self._source = _TFRecordSource(file_pattern, coder, compression_type,
validate)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
class _TFRecordSink(filebasedsink.FileBasedSink):
"""Sink for writing TFRecords files.
For detailed TFRecord format description see:
https://www.tensorflow.org/versions/r1.11/api_guides/python/python_io#TFRecords_Format_Details
"""
def __init__(self, file_path_prefix, coder, file_name_suffix, num_shards,
shard_name_template, compression_type):
"""Initialize a TFRecordSink. See WriteToTFRecord for details."""
super(_TFRecordSink, self).__init__(
file_path_prefix=file_path_prefix,
coder=coder,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
mime_type='application/octet-stream',
compression_type=compression_type)
def write_encoded_record(self, file_handle, value):
_TFRecordUtil.write_record(file_handle, value)
class WriteToTFRecord(PTransform):
"""Transform for writing to TFRecord sinks."""
def __init__(self,
file_path_prefix,
coder=coders.BytesCoder(),
file_name_suffix='',
num_shards=0,
shard_name_template=None,
compression_type=CompressionTypes.AUTO,
**kwargs):
"""Initialize WriteToTFRecord transform.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix.
coder: Coder used to encode each record.
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
default value will be used.
shard_name_template: A template string containing placeholders for
the shard number and shard count. When constructing a filename for a
particular shard number, the upper-case letters 'S' and 'N' are
replaced with the 0-padded shard number and shard count respectively.
This argument can be '' in which case it behaves as if num_shards was
set to 1 and only one file will be generated. The default pattern used
is '-SSSSS-of-NNNNN' if None is passed as the shard_name_template.
compression_type: Used to handle compressed output files. Typical value
is CompressionTypes.AUTO, in which case the file_path's extension will
be used to detect the compression.
**kwargs: Optional args dictionary. These are passed through to parent
constructor.
Returns:
A WriteToTFRecord transform object.
"""
super(WriteToTFRecord, self).__init__(**kwargs)
self._sink = _TFRecordSink(file_path_prefix, coder, file_name_suffix,
num_shards, shard_name_template,
compression_type)
def expand(self, pcoll):
return pcoll | Write(self._sink)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This is a port of the MicroPython upip utility to CPython
# Port Copyright (c) Peter Hinch
# Licensed under the MIT license.
# upip licensing/attribution
# upip - Package manager for MicroPython
#
# Copyright (c) 2015-2018 Paul Sokolovsky
#
# Licensed under the MIT license.
#
# Please note that the author of upip, Paul Sokolovsky, advocates its use
# rather than this port. This is true if using his MicroPython firmware, as
# upip looks in his repo for library modules.
# For users of mainline MicroPython this port ensures that compatible library
# modules are installed.
# Now searches the official library first before looking on PyPi for user
# contributed packages.
import sys
import os
import errno
import json
import zlib
import tarfile
import tempfile
import ssl as ussl
import socket as usocket
debug = False
g_install_path = os.getcwd() # Default install path
gzdict_sz = 16 + 15
def version():
print('Python version 3.2 or above is required.')
sys.exit(1)
if sys.version_info.major < 3:
version()
elif sys.version_info.major == 3 and sys.version_info.minor < 2:
version()
class NotFoundError(Exception):
pass
# Read a line from a socket
def read_line(sock):
ret = b''
while True:
c = sock.recv(1)
if c == b'':
break
elif c == b'\n':
ret += c
break
else:
ret += c
return ret
# Read multiple lines from a socket
def read_lines(sock):
s = b''
while True:
s1 = read_line(sock)
s += s1
if s1 == b'\r\n' or s1 == b'':
break
return s
# Expects absolute path and *file* name
def _makedirs(name):
dirname = os.path.dirname(name)
def split_path(lst, path):
q = os.path.split(path)
if q[1] != '':
lst.append(q[1])
split_path(lst, q[0])
lst = []
split_path(lst, dirname)
lst.reverse()
mypath = os.path.abspath('/')
for elem in lst:
mypath = os.path.join(mypath, elem)
if not os.path.exists(mypath):
try:
os.mkdir(mypath)
except OSError as e:
if e.args[0] != errno.EEXIST and e.args[0] != errno.EISDIR:
raise
def install_tar(f, prefix):
meta = {}
for info in f:
#print(info)
fname = info.name
try:
fname = fname[fname.index("/") + 1:]
except ValueError:
fname = ""
save = True
for p in ("setup.", "PKG-INFO", "README"):
#print(fname, p)
if fname.startswith(p) or ".egg-info" in fname:
if fname.endswith("/requires.txt"):
meta["deps"] = f.extractfile(info).read()
save = False
if debug:
print("Skipping", fname)
break
if save:
outfname = prefix + fname
if info.type != tarfile.DIRTYPE:
if debug:
print("Extracting " + outfname)
_makedirs(outfname)
subf = f.extractfile(info)
with open(outfname, "wb") as outf:
outf.write(subf.read())
return meta
warn_ussl = True
def url_open(url):
global warn_ussl
if debug:
print(url)
proto, _, host, urlpath = url.split('/', 3)
try:
ai = usocket.getaddrinfo(host, 443)
except OSError as e:
print("Unable to resolve %s (no Internet?)" % host)
raise
addr = ai[0][4]
s = usocket.socket(ai[0][0])
try:
if proto == "https:":
s = ussl.wrap_socket(s)
if warn_ussl:
print("Warning: %s SSL certificate is not validated" % host)
warn_ussl = False
s.connect(addr)
s.setblocking(True)
s.send(("GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n" % (urlpath, host)).encode('UTF8'))
l = read_line(s)
protover, status, msg = l.split(None, 2)
if status != b"200":
if status == b"404" or status == b"301":
raise NotFoundError("Package not found")
raise ValueError(status)
while 1:
l = read_line(s)
if not l:
raise ValueError("Unexpected EOF in HTTP headers")
if l == b'\r\n':
break
except Exception as e:
s.close()
raise e
return s
# Now searches official library first before looking on PyPi for user packages
def get_pkg_metadata(name):
try:
f = url_open("https://micropython.org/pi/%s/json" % name)
except:
f = url_open("https://pypi.org/pypi/%s/json" % name)
s = read_lines(f)
try:
return json.loads(s.decode('UTF8'))
finally:
f.close()
def fatal(msg):
print("Error:", msg)
sys.exit(1)
def install_pkg(pkg_spec, install_path):
data = get_pkg_metadata(pkg_spec)
latest_ver = data["info"]["version"]
packages = data["releases"][latest_ver]
assert len(packages) == 1
package_url = packages[0]["url"]
print("Installing %s %s from %s" % (pkg_spec, latest_ver, package_url))
f1 = url_open(package_url)
s = read_lines(f1)
try:
str1 = zlib.decompress(s, gzdict_sz)
with tempfile.TemporaryFile() as temp_file:
temp_file.write(str1)
temp_file.seek(0)
with tarfile.TarFile(fileobj=temp_file) as tar_file: # Expects a file object
meta = install_tar(tar_file, install_path)
finally:
f1.close()
return meta
def install(to_install):
install_path = g_install_path
install_path = os.path.join(install_path, '') # Append final /
if not isinstance(to_install, list):
to_install = [to_install]
print("Installing to: " + install_path)
# sets would be perfect here, but don't depend on them
installed = []
try:
while to_install:
if debug:
print("Queue:", to_install)
pkg_spec = to_install.pop(0)
if pkg_spec in installed:
continue
meta = install_pkg(pkg_spec, install_path)
installed.append(pkg_spec)
if debug:
print(meta)
deps = meta.get("deps", "").rstrip()
if deps:
deps = deps.decode("utf-8").split("\n")
to_install.extend(deps)
except Exception as e:
print("Error installing '{}': {}, packages may be partially installed".format(
pkg_spec, e), file=sys.stderr)
def help_msg():
print("""\
micropip - Simple PyPI package manager for MicroPython
Runs on a PC under Python 3.2 or above, and installs to a PC directory for
subsequent transfer to target hardware.
Usage: micropip.py install [-p <path>] <package>... | -r <requirements.txt>
The above requires micropip.py to have executable permission.
Alternatively: python3 -m micropip install [-p <path>] <package>... | -r <requirements.txt>
If <path> is not given, packages will be installed into the current directory.
Note: only MicroPython packages (usually, named micropython-*) are supported
for installation, upip does not support arbitrary code in setup.py.
""")
def main():
global debug
global g_install_path
if len(sys.argv) < 2 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
help_msg()
return
if sys.argv[1] != "install":
fatal("Only 'install' command supported")
to_install = []
i = 2
while i < len(sys.argv) and sys.argv[i][0] == "-":
opt = sys.argv[i]
i += 1
if opt == "-h" or opt == "--help":
help_msg()
return
elif opt == "-p":
g_install_path = sys.argv[i]
i += 1
elif opt == "-r":
list_file = sys.argv[i]
i += 1
with open(list_file) as f:
while True:
l = f.readline()
if not l:
break
if l[0] == "#":
continue
to_install.append(l.rstrip())
elif opt == "--debug":
debug = True
else:
fatal("Unknown/unsupported option: " + opt)
to_install.extend(sys.argv[i:])
if not to_install:
help_msg()
return
g_install_path = os.path.expanduser(g_install_path)
g_install_path = os.path.abspath(g_install_path)
install(to_install)
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from datetime import datetime as dt
import mock
import pytest
import uuid
import time
from dci import dci_config
from dci.api.v1 import components
from dci.stores import files_utils
from dci.common import exceptions as dci_exc
def test_create_components(admin, topic_id):
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"topic_id": topic_id,
"state": "active",
}
pc = admin.post("/api/v1/components", data=data).data
pc_id = pc["component"]["id"]
gc = admin.get("/api/v1/components/%s" % pc_id).data
assert gc["component"]["name"] == "pname"
assert gc["component"]["state"] == "active"
def test_create_component_lowercase_type(admin, topic_id):
data = {
"name": "pname",
"type": "GERRIT_REVIEW",
"url": "http://example.com/",
"topic_id": topic_id,
"state": "active",
}
component = admin.post("/api/v1/components", data=data).data["component"]
component = admin.get("/api/v1/components/%s" % component["id"]).data["component"]
assert component["type"] == "gerrit_review"
def test_create_components_already_exist(admin, topic_user_id):
data = {"name": "pname", "type": "gerrit_review", "topic_id": topic_user_id}
pstatus_code = admin.post("/api/v1/components", data=data).status_code
assert pstatus_code == 201
pstatus_code = admin.post("/api/v1/components", data=data).status_code
assert pstatus_code == 409
def test_create_components_with_same_name_on_different_topics(admin, topic_id, product):
data = {"name": "pname", "type": "gerrit_review", "topic_id": topic_id}
pstatus_code = admin.post("/api/v1/components", data=data).status_code
assert pstatus_code == 201
topic2 = admin.post(
"/api/v1/topics",
data={
"name": "tname",
"product_id": product["id"],
"component_types": ["type1", "type2"],
},
).data
topic_id2 = topic2["topic"]["id"]
data = {"name": "pname", "type": "gerrit_review", "topic_id": topic_id2}
pstatus_code = admin.post("/api/v1/components", data=data).status_code
assert pstatus_code == 201
def test_create_components_with_same_name_on_same_topics(admin, topic_user_id):
data = {"name": "pname", "type": "gerrit_review", "topic_id": topic_user_id}
pc1 = admin.post("/api/v1/components", data=data)
assert pc1.status_code == 201
pc2 = admin.post("/api/v1/components", data=data)
assert pc2.status_code == 409
def test_create_components_with_same_name_on_same_topics_same_team(
user, topic_user_id, team_user_id
):
data = {
"name": "pname",
"type": "gerrit_review",
"topic_id": topic_user_id,
"team_id": team_user_id,
}
pstatus_code = user.post("/api/v1/components", data=data).status_code
assert pstatus_code == 201
pstatus_code = user.post("/api/v1/components", data=data).status_code
assert pstatus_code == 409
def test_create_components_with_same_name_on_same_topics_different_team(
user, user2, topic_user_id, team_user_id, team_user_id2
):
data = {
"name": "pname",
"type": "gerrit_review",
"topic_id": topic_user_id,
"team_id": team_user_id,
}
pstatus_code = user.post("/api/v1/components", data=data).status_code
assert pstatus_code == 201
data = {
"name": "pname",
"type": "gerrit_review",
"topic_id": topic_user_id,
"team_id": team_user_id2,
}
pstatus_code = user2.post("/api/v1/components", data=data).status_code
assert pstatus_code == 201
def test_recreate_components_with_same_name_on_same_topics(admin, topic_id):
"""The goal of this test is to verify that we can:
- create a component, delete it, then create another component with
the same name as the previous one
- create, then delete, then create, then delete, multiple times a
component with the same name
"""
for n in range(3):
data = {"name": "pouet", "type": "gerrit_review", "topic_id": topic_id}
result = admin.post("/api/v1/components", data=data)
assert result.status_code == 201
result = admin.delete(
"/api/v1/components/%s" % result.data["component"]["id"],
headers={"If-match": result.data["component"]["etag"]},
)
assert result.status_code == 204
def test_create_components_with_same_name_and_different_type(admin, topic_id):
data = {"name": "pname", "type": "first_type", "topic_id": topic_id}
pstatus_code = admin.post("/api/v1/components", data=data).status_code
assert pstatus_code == 201
data = {"name": "pname", "type": "second_type", "topic_id": topic_id}
pstatus_code = admin.post("/api/v1/components", data=data).status_code
assert pstatus_code == 201
def test_create_component_with_tags(admin, topic_id):
data = {
"name": "pname",
"type": "first_type",
"topic_id": topic_id,
"tags": ["tag1", "tag2"],
}
r = admin.post("/api/v1/components", data=data)
assert r.status_code == 201
component = r.data["component"]
r = admin.get("/api/v1/components/%s" % component["id"])
assert r.status_code == 200
assert r.data["component"]["tags"] == ["tag1", "tag2"]
r = admin.put(
"/api/v1/components/%s" % component["id"],
data={"state": "inactive"},
headers={"If-match": component["etag"]},
)
assert r.status_code == 200
assert r.data["component"]["tags"] == ["tag1", "tag2"]
def test_create_component_with_release_at(admin, topic_id):
released_at = dt.utcnow().isoformat()
data = {
"name": "pname",
"type": "first_type",
"topic_id": topic_id,
"released_at": released_at,
}
cmpt = admin.post("/api/v1/components", data=data)
assert cmpt.status_code == 201
cmpt = admin.get("/api/v1/components/%s" % cmpt.data["component"]["id"])
assert cmpt.status_code == 200
assert cmpt.data["component"]["released_at"] == released_at
def test_get_all_components_created_after(admin, topic_id):
created_after = int(time.time() * 1000)
for i in range(5):
admin.post(
"/api/v1/components",
data={
"name": "pname%s" % uuid.uuid4(),
"type": "gerrit_review",
"topic_id": topic_id,
},
).data
db_all_cs = admin.get(
"/api/v1/topics/%s/components?created_after=%s&sort=created_at"
% (topic_id, created_after)
).data
assert len(db_all_cs["components"]) == 5
component_2 = db_all_cs["components"][2]
created_after = int(time.time() * 1000)
db_all_cs = admin.get(
"/api/v1/topics/%s/components?created_after=%s&sort=created_at"
% (topic_id, created_after)
).data
assert len(db_all_cs["components"]) == 0
created_after = component_2["created_at"]
db_all_cs = admin.get(
"/api/v1/topics/%s/components?created_after=%s&sort=created_at"
% (topic_id, created_after)
).data
assert len(db_all_cs["components"]) == 3
def test_get_all_components_updated_after(admin, topic_id):
for i in range(5):
admin.post(
"/api/v1/components",
data={
"name": "pname%s" % uuid.uuid4(),
"type": "gerrit_review",
"topic_id": topic_id,
},
).data
db_all_cs = admin.get(
"/api/v1/topics/%s/components?sort=created_at" % topic_id
).data
assert len(db_all_cs["components"]) == 5
component_2 = db_all_cs["components"][2]
updated_after = dt.utcnow().isoformat()
db_all_cs = admin.get(
"/api/v1/topics/%s/components?updated_after=%s&sort=created_at"
% (topic_id, updated_after)
).data
assert len(db_all_cs["components"]) == 0
admin.put(
"/api/v1/components/%s" % component_2["id"],
headers={"If-match": component_2["etag"]},
data={"name": "lol"},
)
component_2 = admin.get("/api/v1/components/%s" % component_2["id"])
updated_after = component_2.data["component"]["updated_at"]
db_all_cs = admin.get(
"/api/v1/topics/%s/components?updated_after=%s&sort=created_at"
% (topic_id, updated_after)
).data
assert len(db_all_cs["components"]) == 1
def test_get_all_components(admin, topic_id):
created_c_ids = []
for i in range(5):
pc = admin.post(
"/api/v1/components",
data={
"name": "pname%s" % uuid.uuid4(),
"type": "gerrit_review",
"topic_id": topic_id,
},
).data
created_c_ids.append(pc["component"]["id"])
created_c_ids.sort()
db_all_cs = admin.get("/api/v1/topics/%s/components" % topic_id).data
db_all_cs = db_all_cs["components"]
db_all_cs_ids = [db_ct["id"] for db_ct in db_all_cs]
db_all_cs_ids.sort()
assert db_all_cs_ids == created_c_ids
def test_get_all_components_not_in_topic(admin, user, product_openstack):
topic = admin.post(
"/api/v1/topics",
data={
"name": "topic_test",
"product_id": product_openstack["id"],
"component_types": ["type1", "type2"],
},
).data
topic_id = topic["topic"]["id"]
res = user.get("/api/v1/topics/%s/components" % topic_id)
assert res.status_code == 401
assert res.data["message"] == "Operation not authorized."
def test_get_all_components_with_pagination(admin, topic_id):
# create 20 component types and check meta data count
for i in range(20):
admin.post(
"/api/v1/components",
data={
"name": "pname%s" % uuid.uuid4(),
"type": "gerrit_review",
"topic_id": topic_id,
},
)
cs = admin.get("/api/v1/topics/%s/components" % topic_id).data
assert cs["_meta"]["count"] == 20
# verify limit and offset are working well
for i in range(4):
cs = admin.get(
"/api/v1/topics/%s/components?limit=5&offset=%s" % (topic_id, (i * 5))
).data
assert len(cs["components"]) == 5
# if offset is out of bound, the api returns an empty list
cs = admin.get("/api/v1/topics/%s/components?limit=5&offset=300" % topic_id)
assert cs.status_code == 200
assert cs.data["components"] == []
def test_get_all_components_with_where(admin, topic_id):
pc = admin.post(
"/api/v1/components",
data={"name": "pname1", "type": "gerrit_review", "topic_id": topic_id},
).data
pc_id = pc["component"]["id"]
admin.post(
"/api/v1/components",
data={"name": "pname2", "type": "gerrit_review", "topic_id": topic_id},
).data
db_c = admin.get(
"/api/v1/topics/%s/components?where=id:%s" % (topic_id, pc_id)
).data
db_c_id = db_c["components"][0]["id"]
assert db_c_id == pc_id
db_c = admin.get("/api/v1/topics/%s/components?where=name:pname1" % topic_id).data
db_c_id = db_c["components"][0]["id"]
assert db_c_id == pc_id
assert db_c["_meta"]["count"] == 1
def test_where_invalid(admin, topic_id):
err = admin.get("/api/v1/topics/%s/components?where=id" % topic_id)
assert err.status_code == 400
assert err.data["message"] == "Request malformed"
assert err.data["payload"]["error"] == "where: 'id' is not a 'key value csv'"
def test_get_component_by_id_or_name(admin, topic_id):
data = {
"name": "pname",
"type": "gerrit_review",
"topic_id": topic_id,
}
pc = admin.post("/api/v1/components", data=data).data
pc_id = pc["component"]["id"]
# get by uuid
created_ct = admin.get("/api/v1/components/%s" % pc_id)
assert created_ct.status_code == 200
created_ct = created_ct.data
assert created_ct["component"]["id"] == pc_id
def test_get_component_not_found(admin):
result = admin.get("/api/v1/components/ptdr")
assert result.status_code == 404
def test_delete_component_by_id(admin, feeder_context, topic_user_id):
data = {"name": "pname", "type": "gerrit_review", "topic_id": topic_user_id}
pc = feeder_context.post("/api/v1/components", data=data)
pc_id = pc.data["component"]["id"]
assert pc.status_code == 201
created_ct = admin.get("/api/v1/components/%s" % pc_id)
assert created_ct.status_code == 200
deleted_ct = admin.delete(
"/api/v1/components/%s" % pc_id,
headers={"If-match": pc.data["component"]["etag"]},
)
assert deleted_ct.status_code == 204
gct = admin.get("/api/v1/components/%s" % pc_id)
assert gct.status_code == 404
def test_get_all_components_with_sort(admin, topic_id):
# create 4 components ordered by created time
data = {
"name": "pname1",
"title": "aaa",
"type": "gerrit_review",
"topic_id": topic_id,
}
ct_1_1 = admin.post("/api/v1/components", data=data).data["component"]
data = {
"name": "pname2",
"title": "aaa",
"type": "gerrit_review",
"topic_id": topic_id,
}
ct_1_2 = admin.post("/api/v1/components", data=data).data["component"]
data = {
"name": "pname3",
"title": "bbb",
"type": "gerrit_review",
"topic_id": topic_id,
}
ct_2_1 = admin.post("/api/v1/components", data=data).data["component"]
data = {
"name": "pname4",
"title": "bbb",
"type": "gerrit_review",
"topic_id": topic_id,
}
ct_2_2 = admin.post("/api/v1/components", data=data).data["component"]
cts = admin.get("/api/v1/topics/%s/components?sort=created_at" % topic_id).data
cts_id = [db_cts["id"] for db_cts in cts["components"]]
assert cts_id == [ct_1_1["id"], ct_1_2["id"], ct_2_1["id"], ct_2_2["id"]]
# sort by title first and then reverse by created_at
cts = admin.get(
"/api/v1/topics/%s/components?sort=title,-created_at" % topic_id
).data
cts_id = [db_cts["id"] for db_cts in cts["components"]]
assert cts_id == [ct_1_2["id"], ct_1_1["id"], ct_2_2["id"], ct_2_1["id"]]
def test_delete_component_not_found(admin):
result = admin.delete(
"/api/v1/components/%s" % uuid.uuid4(), headers={"If-match": "mdr"}
)
assert result.status_code == 404
def test_put_component(admin, user, topic_id):
data = {
"name": "pname1",
"title": "aaa",
"type": "gerrit_review",
"topic_id": topic_id,
}
ct_1 = admin.post("/api/v1/components", data=data).data["component"]
# Active component
url = "/api/v1/components/%s" % ct_1["id"]
data = {"name": "cname2"}
headers = {"If-match": ct_1["etag"]}
admin.put(url, data=data, headers=headers)
ct_2 = admin.get("/api/v1/components/%s" % ct_1["id"]).data["component"]
assert ct_1["etag"] != ct_2["etag"]
assert ct_2["name"] == "cname2"
def test_update_component_with_tags(admin, topic_id):
data = {
"name": "pname",
"type": "first_type",
"topic_id": topic_id,
"tags": ["tag1", "tag2"],
}
cmpt = admin.post("/api/v1/components", data=data)
assert cmpt.status_code == 201
etag = cmpt.data["component"]["etag"]
data = {"tags": ["hihi", "haha"]}
admin.put(
"/api/v1/components/%s" % cmpt.data["component"]["id"],
data=data,
headers={"If-match": etag},
)
cmpt = admin.get("/api/v1/components/%s" % cmpt.data["component"]["id"])
assert cmpt.data["component"]["tags"] == ["hihi", "haha"]
def test_update_component_lowercase_type(admin, topic_id):
data = {
"name": "pname",
"type": "GERRIT_REVIEW",
"url": "http://example.com/",
"topic_id": topic_id,
"state": "active",
}
component = admin.post("/api/v1/components", data=data).data["component"]
component = admin.put(
"/api/v1/components/%s" % component["id"],
data={"type": "METADATA"},
headers={"If-match": component["etag"]},
).data["component"]
assert component["type"] == "metadata"
def test_add_file_to_component(admin, topic_id):
def create_ct(name):
data = {
"name": name,
"title": "aaa",
"type": "gerrit_review",
"topic_id": topic_id,
}
return admin.post("/api/v1/components", data=data).data["component"]
ct_1 = create_ct("pname1")
ct_2 = create_ct("pname2")
cts = admin.get("/api/v1/components/%s?embed=files" % ct_1["id"]).data
assert len(cts["component"]["files"]) == 0
url = "/api/v1/components/%s/files" % ct_1["id"]
c_file = admin.post(url, data="lol")
c_file_1_id = c_file.data["component_file"]["id"]
url = "/api/v1/components/%s/files" % ct_2["id"]
c_file = admin.post(url, data="lol2")
c_file_2_id = c_file.data["component_file"]["id"]
assert c_file.status_code == 201
l_file = admin.get(url)
assert l_file.status_code == 200
assert l_file.data["_meta"]["count"] == 1
assert l_file.data["component_files"][0]["component_id"] == ct_2["id"]
cts = admin.get("/api/v1/components/%s?embed=files" % ct_1["id"]).data
assert len(cts["component"]["files"]) == 1
assert cts["component"]["files"][0]["size"] == 5
cts = admin.get("/api/v1/components/%s/files" % ct_1["id"]).data
assert cts["component_files"][0]["id"] == c_file_1_id
cts = admin.get("/api/v1/components/%s/files" % ct_2["id"]).data
assert cts["component_files"][0]["id"] == c_file_2_id
def test_download_file_from_component(admin, topic_id):
data = {
"name": "pname1",
"title": "aaa",
"type": "gerrit_review",
"topic_id": topic_id,
}
ct_1 = admin.post("/api/v1/components", data=data).data["component"]
url = "/api/v1/components/%s/files" % ct_1["id"]
data = "lollollel"
c_file = admin.post(url, data=data).data["component_file"]
url = "/api/v1/components/%s/files/%s/content" % (ct_1["id"], c_file["id"])
d_file = admin.get(url)
assert d_file.status_code == 200
assert d_file.data == '"lollollel"'
def test_delete_file_from_component(admin, topic_id):
data = {
"name": "pname1",
"title": "aaa",
"type": "gerrit_review",
"topic_id": topic_id,
}
ct_1 = admin.post("/api/v1/components", data=data).data["component"]
url = "/api/v1/components/%s/files" % ct_1["id"]
data = "lol"
c_file = admin.post(url, data=data).data["component_file"]
url = "/api/v1/components/%s/files" % ct_1["id"]
g_files = admin.get(url)
assert g_files.data["_meta"]["count"] == 1
url = "/api/v1/components/%s/files/%s" % (ct_1["id"], c_file["id"])
d_file = admin.delete(url, headers={"If-match": c_file["etag"]})
assert d_file.status_code == 204
url = "/api/v1/components/%s/files" % ct_1["id"]
g_files = admin.get(url)
assert g_files.data["_meta"]["count"] == 0
def test_change_component_state(admin, topic_id):
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"topic_id": topic_id,
"state": "active",
}
pc = admin.post("/api/v1/components", data=data).data
pc_id = pc["component"]["id"]
t = admin.get("/api/v1/components/" + pc_id).data["component"]
data = {"state": "inactive"}
r = admin.put(
"/api/v1/components/" + pc_id, data=data, headers={"If-match": t["etag"]}
)
assert r.status_code == 200
assert r.data["component"]["state"] == "inactive"
def test_change_component_to_invalid_state(admin, topic_id):
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"topic_id": topic_id,
"state": "active",
}
pc = admin.post("/api/v1/components", data=data).data
pc_id = pc["component"]["id"]
t = admin.get("/api/v1/components/" + pc_id).data["component"]
data = {"state": "kikoolol"}
r = admin.put(
"/api/v1/components/" + pc_id, data=data, headers={"If-match": t["etag"]}
)
assert r.status_code == 400
current_component = admin.get("/api/v1/components/" + pc_id)
assert current_component.status_code == 200
assert current_component.data["component"]["state"] == "active"
def test_component_success_update_field_by_field(admin, topic_id):
data = {"name": "pname", "type": "gerrit_review", "topic_id": topic_id}
c = admin.post("/api/v1/components", data=data).data["component"]
admin.put(
"/api/v1/components/%s" % c["id"],
data={"state": "inactive"},
headers={"If-match": c["etag"]},
)
c = admin.get("/api/v1/components/%s" % c["id"]).data["component"]
assert c["name"] == "pname"
assert c["state"] == "inactive"
assert c["title"] is None
c = admin.put(
"/api/v1/components/%s" % c["id"],
data={"name": "pname2"},
headers={"If-match": c["etag"]},
).data["component"]
assert c["name"] == "pname2"
assert c["state"] == "inactive"
assert c["title"] is None
admin.put(
"/api/v1/components/%s" % c["id"],
data={"title": "a new title"},
headers={"If-match": c["etag"]},
)
c = admin.get("/api/v1/components/%s" % c["id"]).data["component"]
assert c["name"] == "pname2"
assert c["state"] == "inactive"
assert c["title"] == "a new title"
def test_get_component_types_from_topic(admin, engine, topic):
expected_component_types = ["puddle_osp"]
component_types = components.get_component_types_from_topic(
topic["id"], db_conn=engine
)
assert expected_component_types == component_types
def create_component(admin, topic_id, ct, name):
data = {"topic_id": topic_id, "name": name, "type": ct}
component = admin.post("/api/v1/components", data=data).data
return str(component["component"]["id"])
def test_get_last_components_by_type(session, admin, topic):
components_ids = []
for i in range(3):
cid = create_component(admin, topic["id"], "puddle_osp", "name-%s" % i)
components_ids.append(cid)
last_components = components.get_last_components_by_type(
["puddle_osp"], topic_id=topic["id"], session=session
)
assert str(last_components[0].id) == components_ids[-1]
def test_verify_and_get_components_ids(session, admin, topic, topic_user_id):
# components types not valid
with pytest.raises(dci_exc.DCIException):
components.verify_and_get_components_ids(
topic["id"], [], ["puddle_osp"], session=session
)
with pytest.raises(dci_exc.DCIException):
components.verify_and_get_components_ids(
topic["id"],
[str(uuid.uuid4())],
["puddle_osp"],
session=session,
)
# duplicated component types
c1 = create_component(admin, topic_user_id, "type1", "n1")
c2 = create_component(admin, topic_user_id, "type1", "n2")
c3 = create_component(admin, topic_user_id, "type2", "n3")
with pytest.raises(dci_exc.DCIException):
components.verify_and_get_components_ids(
topic_user_id,
[c1, c2, c3],
["type_1", "type_2", "type_3"],
session=session,
)
cids = components.verify_and_get_components_ids(
topic_user_id,
[c1, c3],
["type_1", "type_2"],
session=session,
)
assert set(cids) == {c1, c3}
def test_purge(admin, components_user_ids, topic_user_id):
component_id = components_user_ids[0]
store = dci_config.get_store("components")
url = "/api/v1/components/%s/files" % component_id
c_file1 = admin.post(url, data="lol")
assert c_file1.status_code == 201
path1 = files_utils.build_file_path(
topic_user_id, component_id, c_file1.data["component_file"]["id"]
)
store.get(path1)
url = "/api/v1/components/%s/files" % component_id
c_file2 = admin.post(url, data="lol")
assert c_file2.status_code == 201
path2 = files_utils.build_file_path(
topic_user_id, component_id, c_file2.data["component_file"]["id"]
)
store.get(path2)
component = admin.get("/api/v1/components/%s" % component_id).data["component"]
admin.delete(
"/api/v1/components/%s" % component_id, headers={"If-match": component["etag"]}
)
to_purge = admin.get("/api/v1/components/purge").data
assert len(to_purge["components"]) == 1
c_purged = admin.post("/api/v1/components/purge")
assert c_purged.status_code == 204
with pytest.raises(dci_exc.StoreExceptions):
store.get(path1)
with pytest.raises(dci_exc.StoreExceptions):
store.get(path2)
to_purge = admin.get("/api/v1/components/purge").data
assert len(to_purge["components"]) == 0
def test_purge_failure(admin, components_user_ids, topic_user_id):
component_id = components_user_ids[0]
url = "/api/v1/components/%s/files" % component_id
c_file1 = admin.post(url, data="lol")
assert c_file1.status_code == 201
c_files = admin.get("/api/v1/components/%s/files" % component_id)
assert len(c_files.data["component_files"]) == 1
component = admin.get("/api/v1/components/%s" % component_id).data["component"]
d_component = admin.delete(
"/api/v1/components/%s" % component_id, headers={"If-match": component["etag"]}
)
assert d_component.status_code == 204
to_purge = admin.get("/api/v1/components/purge").data
assert len(to_purge["components"]) == 1
# purge will fail
with mock.patch("dci.stores.filesystem.FileSystem.delete") as mock_delete:
path1 = files_utils.build_file_path(
topic_user_id, component_id, c_file1.data["component_file"]["id"]
)
mock_delete.side_effect = dci_exc.StoreExceptions("error")
purge_res = admin.post("/api/v1/components/purge")
assert purge_res.status_code == 400
store = dci_config.get_store("components")
store.get(path1)
to_purge = admin.get("/api/v1/components/purge").data
assert len(to_purge["components"]) == 1
def test_create_component_as_feeder(admin, topic_id, feeder_context):
data = {"name": "c1", "type": "snapshot", "topic_id": topic_id, "state": "active"}
c = feeder_context.post("/api/v1/components", data=data).data["component"]
component = admin.get("/api/v1/components/%s" % c["id"]).data["component"]
assert component["name"] == "c1"
assert component["state"] == "active"
def test_update_component_as_feeder(admin, topic_id, feeder_context):
data = {"name": "c1", "type": "snapshot", "topic_id": topic_id, "state": "active"}
c = feeder_context.post("/api/v1/components", data=data).data["component"]
feeder_context.put(
"/api/v1/components/%s" % c["id"],
data={"type": "tar"},
headers={"If-match": c["etag"]},
)
component = admin.get("/api/v1/components/%s" % c["id"]).data["component"]
assert component["name"] == "c1"
assert component["type"] == "tar"
def test_create_component_not_allowed_for_user_and_remoteci(
user, remoteci_context, topic_user_id
):
data = {
"name": "c1",
"type": "snapshot",
"topic_id": topic_user_id,
"state": "active",
}
c = user.post("/api/v1/components", data=data)
assert c.status_code == 401
c = remoteci_context.post("/api/v1/components", data=data)
assert c.status_code == 401
# ######### tests teams components
def test_create_teams_components(user, team_user_id, topic_user_id):
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"team_id": team_user_id,
"topic_id": topic_user_id,
"state": "active",
}
pc = user.post("/api/v1/components", data=data).data
pc_id = pc["component"]["id"]
gc = user.get("/api/v1/components/%s" % pc_id).data
assert gc["component"]["name"] == "pname"
assert gc["component"]["state"] == "active"
def test_get_all_teams_components(user, team_user_id, topic_user_id):
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"team_id": team_user_id,
"topic_id": topic_user_id,
"state": "active",
}
pc = user.post("/api/v1/components", data=data).data
pc_id = pc["component"]["id"]
cmpts = user.get(
"/api/v1/topics/%s/components?where=team_id:%s" % (topic_user_id, team_user_id)
).data
assert cmpts["components"][0]["id"] == pc_id
def test_update_teams_components(user, team_user_id, topic_user_id):
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"team_id": team_user_id,
"topic_id": topic_user_id,
"state": "active",
}
pc = user.post("/api/v1/components", data=data).data
pc_id = pc["component"]["id"]
etag = pc["component"]["etag"]
user.put(
"/api/v1/components/%s" % pc_id,
data={"name": "pname2"},
headers={"If-match": etag},
)
gc = user.get("/api/v1/components/%s" % pc_id).data
assert gc["component"]["name"] == "pname2"
def test_delete_teams_components(user, team_user_id, topic_user_id):
data = {
"name": "pname",
"type": "gerrit_review",
"url": "http://example.com/",
"team_id": team_user_id,
"topic_id": topic_user_id,
"state": "active",
}
pc = user.post("/api/v1/components", data=data).data
pc_id = pc["component"]["id"]
gc = user.get("/api/v1/components/%s" % pc_id)
assert gc.status_code == 200
gc = user.delete(
"/api/v1/components/%s" % pc_id, headers={"If-match": pc["component"]["etag"]}
)
assert gc.status_code == 204
gc = user.get("/api/v1/components/%s" % pc_id)
assert gc.status_code == 404
def test_filter_teams_components_by_tag(user, team_user_id, topic_user_id):
data = {
"name": "pname",
"type": "mytest",
"team_id": team_user_id,
"topic_id": topic_user_id,
"tags": ["tag1", "common"],
}
user.post("/api/v1/components", data=data).data
data = {
"name": "pname",
"type": "mylib",
"team_id": team_user_id,
"topic_id": topic_user_id,
"tags": ["tag2", "common"],
}
user.post("/api/v1/components", data=data).data
res = user.get(
"/api/v1/topics/%s/components?where=tags:tag1,team_id:%s"
% (topic_user_id, team_user_id)
)
assert len(res.data["components"]) == 1
assert "tag1" in res.data["components"][0]["tags"]
assert "tag2" not in res.data["components"][0]["tags"]
res = user.get(
"/api/v1/topics/%s/components?where=tags:common,team_id:%s"
% (topic_user_id, team_user_id)
)
assert len(res.data["components"]) == 2
assert "common" in res.data["components"][0]["tags"]
assert "common" in res.data["components"][1]["tags"]
def test_teams_components_isolation(
user, user2, topic_user_id, team_user_id, team_user_id2
):
data = {
"name": "pname",
"type": "mytest",
"topic_id": topic_user_id,
"team_id": team_user_id,
}
pc = user.post("/api/v1/components", data=data)
assert pc.status_code == 201
components = user.get(
"/api/v1/topics/%s/components?where=team_id:%s" % (topic_user_id, team_user_id)
).data
assert components["components"][0]["team_id"] == team_user_id
data = {
"name": "pname",
"type": "mytest",
"topic_id": topic_user_id,
"team_id": team_user_id2,
}
pc = user.post("/api/v1/components", data=data)
assert pc.status_code == 401
pc = user2.post("/api/v1/components", data=data)
assert pc.status_code == 201
components = user2.get(
"/api/v1/topics/%s/components?where=team_id:%s" % (topic_user_id, team_user_id)
)
assert components.status_code == 200
assert components.data["components"] == []
components = user2.get(
"/api/v1/topics/%s/components?where=team_id:%s" % (topic_user_id, team_user_id2)
)
assert components.status_code == 200
assert components.data["components"][0]["team_id"] == team_user_id2
|
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2020, Scripps Research
# All rights reserved.
man_page_text = """
Usage (example):
postprocess_transitions.py ttree_assignments.txt -t file.template \
> new_file.template
Where "file.template" contains contents similar to:
if atoms == [@atom:A,@atom:B,@atom:C] and
bonds == [[1,2], [2,3]] and
bonding_ids == [1,2] and
edge_ids == [3]
then
atoms = [@atom:B,@atom:B,@atom:C] and
bonds = [[2,3], [3,1]]
Eventually, I will also support this syntax:
if atoms @atom:A @atom:B* @atom:C and
bond_type[1,2] == @bond:AB and
bond_type[2,3] == @bond:BC and
angle_type[1,2,3] == @angle:ABC and
distance[1,3] < 3.0 and prob(0.5)
then
atom_type[2] = @atom:B and
atom_type[3] = @atom:D and
delete_bond(1,2) and
bond_type[1,3] = @bond:AD
if atom_type[1] == @atom:A and
atom_type[2] == @atom:B* and
atom_type[3] == @atom:C and
bond_type[1,2] == @bond:AB and
bond_type[2,3] == @bond:BC and
angle_type[1,2,3] == @angle:ABC and
distance[1,3] < 3.0 and prob(0.5)
then
atom_type[2] = @atom:B and
atom_type[3] = @atom:D and
delete_bond(1,2) and
bond_type[1,3] = @bond:AD
if atom_type[1] == @atom:A and
atom_type[2] == @atom:B* and
atom_type[3] == @atom:C and
bond_type[1,2] == @bond:AB and
bond_type[2,3] == @bond:BC and
rmsd((1,2,3), ((1.3,5.2,1.2),(2.4,4.5,6.6),(0.01,1.5,9.55)) <= 3.2
then
delete_bond(2,3)
delete_atom(3)
if atom_type[1] == @atom:A and
atom_type[2] == @atom:B* and
atom_type[3] == @atom:C and
bond_type[1,2] == @bond:AB and
bond_type[2,3] == @bond:BC and
rmsd((1,2,3), ((1.3,5.2,1.2),(2.4,4.5,6.6),(0.01,1.5,9.55)) <= 3.2
then
coords = ((1.3,5.2,1.2),(2.4,4.5,6.6),(0.01,1.5,9.55),(-1.2,0.1,12)) #add atom
and atom_type[4] = @atom:D
and bond_type[3,4] = @bond:CD
"""
# Keywords recognized by this script:
# '[', ']', '(', ')', ',', '==', '=', 'atom_type', 'bond_type', 'angle_type', 'dihedral_type', 'improper_type', 'distance', 'prob', 'rmsd', 'coords', 'delete_atom', 'delete_bond', 'delete_angle', 'delete_dihedral', 'dihedral_improper'
#
# ------ the following features not needed for version 1.0: ------
#
#1) This next step is only needed for rmsd((),()) and coords():
# Create a variant of SplitQuotedString() that splits according to both
# parenthesis and commas and works with nested expressions.
# Call it "SplitNestedQuotes()". It accepts these arguments with these
# default values:
# delim = ','
# open_paren = '(',
# close_paren = ')',
# It will split template lists of this form
# ['bond((1,2),', VarRef('@/bond:AB'), '), ((2,3),', VarRef('@/bond:BC'),'))']
# ... which is what ReadTemplate() will return when invoked on
# 'bond(((1,2),@/bond:AB), ((2,3),@/bond:BC))'
# into something like this:
# KRUFT ALERT. THE NEXT FEW LINES OF COMMENTS ARE OUT OF DATE -AJ2020-11
# ['bond',
# '(',
# ['(1,2),', VarRef('@/bond:AB')],
# ['(2,3),', VarRef('@/bond:BC')],
# ')']
# Note: This function only processes the outermost paren expression.
# The '(1,2),' was not processed further. Had it been, it would have
# returned [['(', ['1','2'] ,')'], '']
#
#2) Use SplitNestedQuotes() to find the arguments following the
# rmsd() and coords() keywords.
import sys
import argparse
from collections import defaultdict
import re
import gc
try:
from .ttree import ExtractFormattingCommands
from .ttree_lex import *
except (ImportError, SystemError, ValueError):
# not installed as a package
from ttree import ExtractFormattingCommands
from ttree_lex import *
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2020-11-04'
g_version_str = '0.0.3'
g_program_name = g_filename
#sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
def main():
try:
ap = argparse.ArgumentParser(prog=g_program_name)
ap.add_argument('bindings_filename', # (positional argument)
help='assignments file name (usually "ttree_assignments.txt")')
ap.add_argument('-t', '--template', dest='template', required=False,
help='template text file (typically generated by moltemplate, and ending in ".template")')
args = ap.parse_args()
bindings_filename = args.bindings_filename
f = open(bindings_filename)
atom_types = set([])
bond_types = set([])
angle_types = set([])
dihedral_types = set([])
improper_types = set([])
# The line above is robust but it uses far too much memory.
# This for loop below works for most cases.
for line in f:
#tokens = lines.strip().split()
# like split but handles quotes
tokens = SplitQuotedString(line.strip())
if len(tokens) < 2:
continue
if tokens[0].find('@') != 0:
continue
if tokens[0][2:].find('atom') == 0:
atom_types.add(tokens[0][1:])
elif tokens[0][2:].find('bond') == 0:
bond_types.add(tokens[0][1:])
elif tokens[0][2:].find('angle') == 0:
angle_types.add(tokens[0][1:])
elif tokens[0][2:].find('dihedral') == 0:
dihedral_types.add(tokens[0][1:])
elif tokens[0][2:].find('improper') == 0:
improper_types.add(tokens[0][1:])
f.close()
gc.collect()
# Now open the template file containing the list of transition rules.
if args.template is not None:
templ_file = open(args.template, 'r')
lex = LineLex(templ_file)
else:
templ_file = sys.stdin
lex = TtreeShlex(sys.stdin,
'__standard_input_for_postprocess_coeffs__')
lex.commenters = '#' #(don't attempt to skip over comments)
lex.line_extend_chars += '&' #(because LAMMPS interprets '&' as '\')
transition_rules_orig = []
if_clause = []
then_clause = []
in_if_clause = True
while lex:
token = lex.get_token()
if ((token == '') or (token == 'if')):
if ((len(if_clause)>0) and (len(then_clause)>0)):
transition_rules_orig.append([if_clause, then_clause])
if_clause = []
then_clause = []
if token == 'if':
in_if_clause = True
continue
elif token == '':
break
elif token == 'then':
then_clause = []
in_if_clause = False
continue
elif token in ('@', '$'):
var_name = GetVarName(lex)
token = token + var_name # for example: "@/atom:GAFF2/c3"
if in_if_clause:
if_clause.append(token)
else:
then_clause.append(token)
# now close the file (if we opened it)
if args.template is not None:
templ_file.close()
# Now split the if and then clauses into tokens separated by "and"
if_requirements = []
if_requirement = []
then_results = []
then_result = []
for rule in transition_rules_orig:
if_clause = rule[0]
then_clause = rule[1]
#split the if_clause into lists of tokens separated by 'and':
for token in if_clause:
if ((token == 'and') and (len(if_requirement) > 0)):
if_requirements.append(if_requirement)
if_requirement = []
else:
if_requirement.append(token)
if len(if_requirement) > 0:
if_requirements.append(if_requirement)
# Replace rule[0] with the if_requirements list
rule[0] = if_requirements
#split the then_clause into lists of tokens separated by 'and'
for token in then_clause:
if ((token == 'and') and (len(then_result) > 0)):
then_results.append(then_result)
then_result = []
else:
then_result.append(token)
if len(then_result) > 0:
then_results.append(then_result)
# Replace rule[1] with the then_results list
rule[1] = then_results
# Now loop through all of the transition rules. For each rule,
# figure out how many times the user specified an atom type, or
# bonded type, or angle type or dihedral type or improper type
# which must be satisfied in order to satisfy the if conditions.
#
# Then, for that rule, generate a separate transition rule for
# every possible combination of atom types, bond types, angle types,
# dihedral types, and improper types which satisfies the requirements
# specified by the user after considering wildcards and regex.
# In this way, a single rule specified by the user (with vague atom
# type names or vague bonded type napes) might get translated
# (expanded) into a large number of individual transition rules
# for use with fix bond/react, where in each rule, each atom type,
# bond type, angle type, etc... is specified explicitly.
Natoms = 0 # we will store the number of atoms in the
# pre-reaction template here
transition_rules = [] # we will store processed transition rules here
atom_req = [] # atom type requirements
bond_req = [] # bond type requirements
angle_req = [] # angle type requirements
dihedral_req = [] # dihedral type requirements
improper_req = [] # improper type requirements
for rule in transition_rules_orig:
if_requirements = rule[0]
for if_requirement in if_requirements:
tokens = if_requirement
assert(len(tokens) > 0)
iatm = -1
if tokens[0] == 'atom':
# allow users to use either '=' or '==' to test for equality
# For example, these should both work:
# 'if atom[1] == @atom:A'
# 'if atom[1] = @atom:A'.
# But if '==' was used, the lexer will mistake this for
# two separate tokens ('=' followed by '='). We take care
# of that here by deleting the extra '='
if (tokens[4] == tokens[5] == '='):
tokens[4] = '=='
del tokens[5]
if not ((len(tokens) == 6) and
(tokens[1] == '[') and
(tokens[2].isnumeric() and
(int(tokens[2]) > 0)) and
(tokens[3] == ']') and
(tokens[4] == '==') and
((tokens[5].find('@') != -1) and
(tokens[5].find('atom:') != -1))):
raise InputError('Error in transitions file near:\n'+
' '+' '.join(tokens)+'\n')
iatm = int(tokens[2])
if iatm >= Natoms:
atom_req += [[] for i in range(0, 1 + iatm - Natoms)]
Natoms = iatm + 1
assert(Natoms == len(atom_req))
typestr = tokens[5][1:] # a string identifying atom type(s)
# eg: '@atom:/SPCE/O' or '@atom:C*'
#icolon = typestr.find('atom:') + 5
#typestr = typestr[icolon:]
# If the second token is surrounded by '/' characters, interpret
# it as a regular expression.
type_is_re = HasRE(typestr)
# If the second token contains wildcard characters, interpret
# it as a wildcard (ie. glob) expression.
type_is_wild = (HasWildcard(typestr) #does it contain *,?
and
(typestr[0] != '{')) #(ignore * or ? in {})
if type_is_re:
regex_str = VarNameToRegex(typestr)
typepattern = re.compile(regex_str)
else:
#left_paren, typepattern, text_after=ExtractVarName(typestr)
typepattern = typestr
left_paren = ''
text_after = ''
for atype in atom_types:
if MatchesPattern(atype, typepattern):
#atom_req[iatm].append('@'+left_paren+atype+text_after)
atom_req[iatm].append('@{'+atype+'}')
# ------------------ CONTINUEHERE --------------------
except (ValueError, InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1APIServiceList(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1APIService]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1APIServiceList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1APIServiceList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1APIServiceList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1APIServiceList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1APIServiceList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1APIServiceList. # noqa: E501
Items is the list of APIService # noqa: E501
:return: The items of this V1APIServiceList. # noqa: E501
:rtype: list[V1APIService]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1APIServiceList.
Items is the list of APIService # noqa: E501
:param items: The items of this V1APIServiceList. # noqa: E501
:type: list[V1APIService]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1APIServiceList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1APIServiceList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1APIServiceList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1APIServiceList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1APIServiceList. # noqa: E501
:return: The metadata of this V1APIServiceList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1APIServiceList.
:param metadata: The metadata of this V1APIServiceList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1APIServiceList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1APIServiceList):
return True
return self.to_dict() != other.to_dict()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from EnvironmentListCtrl import EnvironmentListCtrl
from cairis.core.MisuseCaseEnvironmentProperties import MisuseCaseEnvironmentProperties
from MisuseCaseNotebook import MisuseCaseNotebook
__author__ = 'Shamal Faily'
class MisuseCaseEnvironmentPanel(wx.Panel):
def __init__(self,parent,dp):
wx.Panel.__init__(self,parent,MISUSECASE_PANELENVIRONMENT_ID)
self.dbProxy = dp
self.theEnvironmentDictionary = {}
self.theSelectedIdx = -1
self.theSelectedRisk = ''
self.theSelectedThreat = ''
self.theSelectedVulnerability = ''
mainSizer = wx.BoxSizer(wx.HORIZONTAL)
environmentBox = wx.StaticBox(self)
environmentListSizer = wx.StaticBoxSizer(environmentBox,wx.HORIZONTAL)
mainSizer.Add(environmentListSizer,0,wx.EXPAND)
self.environmentList = EnvironmentListCtrl(self,MISUSECASE_LISTENVIRONMENTS_ID,self.dbProxy)
self.environmentList.Unbind(wx.EVT_RIGHT_DOWN)
environmentListSizer.Add(self.environmentList,1,wx.EXPAND)
environmentDimSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(environmentDimSizer,1,wx.EXPAND)
nbBox = wx.StaticBox(self,-1)
nbSizer = wx.StaticBoxSizer(nbBox,wx.VERTICAL)
environmentDimSizer.Add(nbSizer,1,wx.EXPAND)
self.notebook = MisuseCaseNotebook(self)
nbSizer.Add(self.notebook,1,wx.EXPAND)
self.SetSizer(mainSizer)
self.objectiveCtrl = self.notebook.FindWindowById(MISUSECASE_TEXTOBJECTIVE_ID)
self.attackerList = self.notebook.FindWindowById(MISUSECASE_LISTATTACKERS_ID)
self.assetList = self.notebook.FindWindowById(MISUSECASE_LISTASSETS_ID)
self.threatCtrl = self.notebook.FindWindowById(MISUSECASE_TEXTTHREAT_ID)
self.lhoodCtrl = self.notebook.FindWindowById(MISUSECASE_TEXTLIKELIHOOD_ID)
self.vulCtrl = self.notebook.FindWindowById(MISUSECASE_TEXTVULNERABILITY_ID)
self.sevCtrl = self.notebook.FindWindowById(MISUSECASE_TEXTSEVERITY_ID)
self.ratingCtrl = self.notebook.FindWindowById(MISUSECASE_TEXTSCORE_ID)
self.narrativeCtrl = self.notebook.FindWindowById(MISUSECASE_TEXTNARRATIVE_ID)
self.environmentList.Bind(wx.EVT_LIST_INSERT_ITEM,self.OnAddEnvironment)
self.environmentList.Bind(wx.EVT_LIST_DELETE_ITEM,self.OnDeleteEnvironment)
self.narrativeCtrl.Disable()
def unloadMCComponents(self):
self.ratingCtrl.SetValue('')
self.threatCtrl.SetValue('')
self.lhoodCtrl.SetValue('')
self.vulCtrl.SetValue('')
self.sevCtrl.SetValue('')
self.attackerList.DeleteAllItems()
self.assetList.DeleteAllItems()
self.objectiveCtrl.SetValue('')
def loadMCComponents(self):
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
self.ratingCtrl.SetValue(self.dbProxy.riskRating(self.theSelectedThreat,self.theSelectedVulnerability,environmentName) )
self.threatCtrl.SetValue(self.theSelectedThreat)
threatId = self.dbProxy.getDimensionId(self.theSelectedThreat,'threat')
environmentId = self.dbProxy.getDimensionId(environmentName,'environment')
self.lhoodCtrl.SetValue(self.dbProxy.threatLikelihood(threatId,environmentId))
self.vulCtrl.SetValue(self.theSelectedVulnerability)
vulId = self.dbProxy.getDimensionId(self.theSelectedVulnerability,'vulnerability')
self.sevCtrl.SetValue(self.dbProxy.vulnerabilitySeverity(vulId,environmentId))
self.attackerList.DeleteAllItems()
attackers = self.dbProxy.threatAttackers(threatId,environmentId)
attackerSet = set(attackers)
for atidx,attacker in enumerate(attackerSet):
self.attackerList.InsertStringItem(atidx,attacker)
threatenedAssets = self.dbProxy.threatenedAssets(threatId,environmentId)
vulnerableAssets = self.dbProxy.vulnerableAssets(vulId,environmentId)
objectiveText = 'Exploit vulnerabilities in '
for idx,vulAsset in enumerate(vulnerableAssets):
objectiveText += vulAsset
if (idx != (len(vulnerableAssets) -1)):
objectiveText += ','
objectiveText += ' to threaten '
for idx,thrAsset in enumerate(threatenedAssets):
objectiveText += thrAsset
if (idx != (len(threatenedAssets) -1)):
objectiveText += ','
objectiveText += '.'
self.objectiveCtrl.SetValue(objectiveText)
self.assetList.DeleteAllItems()
assetSet = set(threatenedAssets + vulnerableAssets)
for asidx,asset in enumerate(assetSet):
self.assetList.InsertStringItem(asidx,asset)
def loadMisuseCase(self,mc):
self.theSelectedRisk = mc.risk()
self.theSelectedThreat = mc.threat()
self.theSelectedVulnerability = mc.vulnerability()
self.environmentList.Unbind(wx.EVT_LIST_ITEM_SELECTED)
self.environmentList.Unbind(wx.EVT_LIST_ITEM_DESELECTED)
environmentNames = []
for cp in mc.environmentProperties():
environmentNames.append(cp.name())
self.environmentList.load(environmentNames)
for cp in mc.environmentProperties():
environmentName = cp.name()
self.theEnvironmentDictionary[environmentName] = cp
environmentNames.append(environmentName)
environmentName = environmentNames[0]
p = self.theEnvironmentDictionary[environmentName]
self.narrativeCtrl.SetValue(p.narrative())
self.environmentList.Select(0)
self.loadMCComponents()
self.environmentList.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnEnvironmentSelected)
self.environmentList.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnEnvironmentDeselected)
self.narrativeCtrl.Enable()
self.theSelectedIdx = 0
def loadRiskComponents(self,threatName,vulName):
self.theSelectedThreat = threatName
self.theSelectedVulnerability = vulName
self.environmentList.Unbind(wx.EVT_LIST_INSERT_ITEM)
self.environmentList.Unbind(wx.EVT_LIST_DELETE_ITEM)
self.environmentList.Unbind(wx.EVT_LIST_ITEM_SELECTED)
self.environmentList.Unbind(wx.EVT_LIST_ITEM_DESELECTED)
environments = self.dbProxy.threatVulnerabilityEnvironmentNames(threatName,vulName)
for environmentName in environments:
self.theEnvironmentDictionary[environmentName] = MisuseCaseEnvironmentProperties(environmentName)
self.environmentList.load(environments)
self.environmentList.Select(0)
self.theSelectedIdx = 0
self.loadMCComponents()
self.environmentList.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnEnvironmentSelected)
self.environmentList.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnEnvironmentDeselected)
self.environmentList.Bind(wx.EVT_LIST_INSERT_ITEM,self.OnAddEnvironment)
self.environmentList.Bind(wx.EVT_LIST_DELETE_ITEM,self.OnDeleteEnvironment)
def OnEnvironmentSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
p = self.theEnvironmentDictionary[environmentName]
self.narrativeCtrl.SetValue(p.narrative())
self.loadMCComponents()
self.narrativeCtrl.Enable()
def OnEnvironmentDeselected(self,evt):
self.theSelectedIdx = evt.GetIndex()
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
self.theEnvironmentDictionary[environmentName] = MisuseCaseEnvironmentProperties(environmentName,self.narrativeCtrl.GetValue())
self.narrativeCtrl.SetValue('')
self.narrativeCtrl.Disable()
self.unloadMCComponents()
def OnAddEnvironment(self,evt):
self.theSelectedIdx = evt.GetIndex()
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
self.theEnvironmentDictionary[environmentName] = MisuseCaseEnvironmentProperties(environmentName)
self.environmentList.Select(self.theSelectedIdx)
self.loadMCComponents()
self.narrativeCtrl.Enable()
def OnDeleteEnvironment(self,evt):
selectedIdx = evt.GetIndex()
environmentName = self.environmentList.GetItemText(selectedIdx)
del self.theEnvironmentDictionary[environmentName]
self.theSelectedIdx = -1
self.narrativeCtrl.SetValue('')
self.narrativeCtrl.Disable()
self.unloadMCComponents()
def environmentProperties(self):
if (self.theSelectedIdx != -1):
environmentName = self.environmentList.GetItemText(self.theSelectedIdx)
self.theEnvironmentDictionary[environmentName] = MisuseCaseEnvironmentProperties(environmentName,self.narrativeCtrl.GetValue())
return self.theEnvironmentDictionary.values()
|
|
'''
#------------------------------------------------------------------------------
# TF Dataset object for training & validation using TFGRAND5
#------------------------------------------------------------------------------
# Features:
# Input/Output
# - Input (from GRAND5)
# Dimension : 24x320x320
# Intensity range : 0 to approx. 255 (can exceed this)
# Channels: t1, t2, tc, dwi b1000, tumor mask, water mask
# - Ouptut
# Dimension: 24x256x256 (resized, currently not cropped)
# Intensity range : 0 to 2.0 (rescaled by dividing by 255)
# Channels: t1, t2, tc, dwi (adc WIP), 3 channel tumor+water+bg mask
#
# Data Augmentation
# - Series deregistration & overall rotation
# - Spatial flipUD, flipLR, x-y offset, x-y scaling
# - Intensity DC offset, scaling
# - Random series dropout
# - Class over/undersampling (for training set only)
#
# Heirarchical classification
# - Automatic small & big tumor classes groupings
# - Adjacency matrix for edges between small & big classes (NClassMat)
# Linked classes receives a weight of 1, unrelated classes -0.5
#
# Data Selection
# - Based on Jiahao's TFGRAND5 file path selection script
# - Random seed can be used to generate train/validation split. This
# random seed will also affect the random placement of data onto
# MPI nodes
# - Alternatively, seperate fixed list (read from text file) of tfrecord
# paths can be fed as validation set
# - Overlaps between train & validation sets will be removed with
# priority given to validation
#------------------------------------------------------------------------------
'''
# Python2 compatibility
from __future__ import print_function
import numpy as np
import glob
import fnmatch
import random
import sys
import os
import tensorflow as tf
# Do not initialize MPI
import mpi4py.rc
mpi4py.rc.initialize = False
mpi4py.rc.finalize = False
import mpi4py.MPI as MPI
# Randomization functions
def randomAngle(deg=40, N=1):
return tf.random_uniform([N], \
minval=-deg/180.*np.pi, maxval=deg/180.*np.pi)
def randomSizes(xy_size):
RAND = tf.random_uniform([2], minval=0.85, maxval=1.1, dtype=tf.float32)
newsize = tf.convert_to_tensor(xy_size, dtype=tf.float32)*RAND
return tf.cast(newsize, tf.int32)
def randomBool():
RAND = tf.random_uniform([1], minval=0, maxval=1, dtype=tf.int32)[0]
return tf.cast(RAND, tf.bool)
def zoomTF(x, image_shape, size, tr_only=False):
with tf.name_scope('zoomTF'):
zsize = tf.cast(size, tf.float32)
h_frac = 1.0*image_shape[1]/zsize[0]
w_frac = 1.0*image_shape[2]/zsize[1]
hd = 0.5*h_frac*(zsize[0] - image_shape[1])
wd = 0.5*w_frac*(zsize[1] - image_shape[2])
zoom_tr = tf.convert_to_tensor([h_frac, 0, hd, 0, w_frac, wd, 0, 0])
zoom_tr = tf.expand_dims(zoom_tr, axis=0)
if tr_only:
out = zoom_tr
else:
out = tf.contrib.image.transform( \
x, zoom_tr, interpolation='BILINEAR')
return out
def rotateTF(x, image_shape, angle_rad, tr_only=False):
# angle_rad can be list of angles
with tf.name_scope('rotateTF'):
rotate_tr = tf.contrib.image.angles_to_projective_transforms(\
angle_rad, image_shape[1], image_shape[2])
if tr_only:
out = rotate_tr
else:
out = tf.contrib.image.transform( \
x, rotate_tr, interpolation='BILINEAR')
return out
def flipLR(x, image_shape, flip, tr_only=False):
with tf.name_scope('randomFlip'):
# vol must be of shape [batch or z, y, x, channels]
flip_tr = tf.convert_to_tensor(
[-1., 0., image_shape[2], 0., 1., 0., 0., 0.], dtype=tf.float32)
flip_id = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], dtype=tf.float32)
if tr_only:
out = tf.cond(flip, lambda: flip_tr, lambda: flip_id)
out = tf.expand_dims(out, axis=0)
else:
out = tf.cond(flip, lambda: tf.contrib.image.transform( \
x, flip_tr, interpolation='BILINEAR'), lambda: x)
return out
def flipUD(x, image_shape, flip, tr_only=False):
with tf.name_scope('randomFlip'):
# vol must be of shape [batch or z, y, x, channels]
flip_tr = tf.convert_to_tensor(
[1., 0., 0., 0., -1., image_shape[1], 0., 0.], dtype=tf.float32)
flip_id = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], dtype=tf.float32)
if tr_only:
out = tf.cond(flip, lambda: flip_tr, lambda: flip_id)
out = tf.expand_dims(out, axis=0)
else:
out = tf.cond(flip, lambda: tf.contrib.image.transform( \
x, flip_tr, interpolation='BILINEAR'), lambda: x)
return out
def randomOffset(xy_size):
RAND = tf.random_uniform([2], minval=-0.1, maxval=0.1, dtype=tf.float32)
offset = tf.convert_to_tensor(xy_size, dtype=tf.float32)*RAND
return tf.cast(offset, tf.int32)
def offsetTF(x, image_shape, xy_offset, tr_only=False):
with tf.name_scope('randomOffset'):
# vol must be of shape [batch or z, y, x, channels]
xy_offset = tf.cast(xy_offset, tf.float32)
offset_tr = tf.convert_to_tensor( \
[1., 0., xy_offset[0], 0., 1., xy_offset[1], 0., 0.], dtype=tf.float32)
if tr_only:
out = tf.expand_dims(offset_tr, axis=0)
else:
out = tf.contrib.image.transform(x, offset_tr, interpolation='BILINEAR')
return out
def CenterBoxTF(data, box):
def shapeDHW(data):
return data.shape[0].value, data.shape[1].value, data.shape[2].value
D,H,W = shapeDHW(data)
d,h,w,c = box
d,h,w = D-d, H-h, W-w
d,h,w = int(d/2), int(h/2), int(w/2)
return d, d+box[0], h, h+box[1], w, w+box[2]
def BoundingBoxTF(data, box, training=True):
with tf.name_scope('BoundingBoxTF'):
# Centers BBox to tumor mask
def shapeDHW(data):
return data.shape[0].value, \
data.shape[1].value, data.shape[2].value
assert isinstance(data, tf.Tensor), "Data is not Tensor"
D,H,W = shapeDHW(data)
#box = tf.constant(box,dtype=tf.int32, shape=[4])
#d,h,w = box[0],box[1],box[2]
d,h,w,c = box
d,h,w,c = d/2., h/2., w/2., c
d1,h1,w1 = tf.floor(d),tf.floor(h),tf.floor(w)
d2,h2,w2 = tf.ceil(d), tf.ceil(h), tf.ceil(w)
d1,h1,w1 = tf.cast(d1, dtype=tf.int32),tf.cast( \
h1, dtype=tf.int32),tf.cast(w1, dtype=tf.int32)
d2,h2,w2 = tf.cast(d2, dtype=tf.int32),tf.cast( \
h2, dtype=tf.int32),tf.cast(w2, dtype=tf.int32)
coord = tf.where(data > 0)
centroid = tf.reduce_mean(coord, 0)
centroid = tf.cast(centroid, tf.int32)
## De-Centered the Centroid
xshift = tf.random_uniform([1],-1, 2, dtype=tf.int32)[0]
## De-Centered the Centroid
shift = tf.random_uniform([3],-37,38,dtype=tf.int32)
x,y,z = centroid[0], centroid[1], centroid[2]
if training:
x,y,z = centroid[0] + xshift, \
centroid[1] + shift[1], centroid[2] + shift[2]
minX,minY,minZ = tf.subtract(x,d1),tf.subtract(y,h1),tf.subtract(z,w1)
boundX = tf.maximum(0,-minX)
boundY = tf.maximum(0,-minY)
boundZ = tf.maximum(0,-minZ)
maxX,maxY,maxZ = x + d2 + boundX, y + h2 + boundY, z + w2 + boundZ
minX,minY,minZ = tf.maximum(0,minX), \
tf.maximum(0,minY), tf.maximum(0,minZ)
boundX = tf.maximum(0, maxX-D)
boundY = tf.maximum(0, maxY-H)
boundZ = tf.maximum(0, maxZ-W)
minX,minY,minZ = minX - boundX, minY - boundY, minZ - boundZ
return minX, maxX, minY, maxY, minZ, maxZ, centroid
def TFAugmentation(t1, t2, tc, dwi, mask, wmask, \
image_shape, crop_shape, Training=True):
# Concat series before augmentation to perform operations simultaneously
data_block = tf.concat([t1,t2,tc,dwi,mask,wmask], axis=-1)
N = tf.shape(data_block)[-1]
# Perform Any Augmentation using TF operation
if Training:
# Move 'N' axis to dim 0 --> [N, y, x, z]
data_block = tf.transpose(data_block, [3, 1, 2, 0])
# De-registration
microDeg = 0.01
microAngles = tf.concat([randomAngle(microDeg, N=N-1), \
tf.convert_to_tensor([0.0], dtype=tf.float32)], axis=0)
#data_block = rotateTF(data_block, image_shape, microAngles)
dereg_tr = rotateTF(None, image_shape, microAngles, tr_only=True)
# Random rotations
angle = randomAngle()
#data_block = rotateTF(data_block, image_shape, angle)
rotate_tr = rotateTF(None, image_shape, angle, tr_only=True)
rotate_tr = tf.tile(rotate_tr, [N,1])
# Random displacement
displacement = randomOffset(image_shape[1:3])
offset_tr = offsetTF(None, image_shape, displacement, tr_only=True)
offset_tr = tf.tile(offset_tr, [N,1])
# Random zooms
rescale = randomSizes(image_shape[1:3])
#data_block = zoomTF(data_block, image_shape, rescale)
zoom_tr = zoomTF(None, image_shape, rescale, tr_only=True)
zoom_tr = tf.tile(zoom_tr, [N,1])
# Random flip
flip_lr = flipLR(None, image_shape, randomBool(), tr_only=True)
flip_lr = tf.tile(flip_lr, [N,1])
flip_ud = flipUD(None, image_shape, randomBool(), tr_only=True)
flip_ud = tf.tile(flip_ud, [N,1])
# Perform all transformations
all_tr = [dereg_tr, rotate_tr, offset_tr, zoom_tr, flip_lr, flip_ud]
all_tr = tf.contrib.image.compose_transforms(*all_tr)
data_block = tf.contrib.image.transform( \
data_block, all_tr, interpolation='BILINEAR')
# Swap 'N' axis back to dim 3 --> [z, y, x, N]
data_block = tf.transpose(data_block, [3, 1, 2, 0])
minZ, maxZ, minY, maxY, minX, maxX = \
CenterBoxTF(data_block, box=crop_shape)
data_block = data_block[minZ:maxZ,minY:maxY,minX:maxX,:]
# Un-concat & crop z series back to original channels
t1 = data_block[:,:,:,0:1]
t2 = data_block[:,:,:,1:2]
tc = data_block[:,:,:,2:3]
dwi = data_block[:,:,:,3:4]
mask = data_block[:,:,:,4:5]
wmask = data_block[:,:,:,5:]
return t1 ,t2 ,tc, dwi, mask, wmask
def TFDownsample(t1, t2, tc, dwi, mask, wmask, downsample_shape, image_shape):
# downsample_shape, image_shape args = (batch or z, y, x, channels)
with tf.name_scope('TFDownsample'):
# Concat series before aug to perform operations simultaneously
data_block = tf.concat([t1,t2,tc,dwi,mask,wmask], axis=-1)
# Downsamples x-y only
new_shape = tf.cast(downsample_shape[1:3], tf.float32)
minH = int(0.5*(image_shape[1] - downsample_shape[1]))
minW = int(0.5*(image_shape[2] - downsample_shape[2]))
h_frac = 1.0*image_shape[1]/new_shape[0]
w_frac = 1.0*image_shape[2]/new_shape[1]
hd = 0.5*h_frac*(new_shape[0] - image_shape[1])
wd = 0.5*w_frac*(new_shape[1] - image_shape[2])
zoom_tr = [h_frac, 0, hd, 0, w_frac, wd, 0, 0]
data_block = tf.contrib.image.transform( \
data_block, zoom_tr, interpolation='BILINEAR')
data_block = data_block[:,minH:minH+downsample_shape[1], \
minW:minW+downsample_shape[2],:]
# Un-concat series back to original channels
t1 = data_block[:,:,:,0:1]
t2 = data_block[:,:,:,1:2]
tc = data_block[:,:,:,2:3]
dwi = data_block[:,:,:,3:4]
mask = data_block[:,:,:,4:5]
wmask = data_block[:,:,:,5:]
return t1 ,t2 ,tc, dwi, mask, wmask
def selectedDiseasePath(ROOT, disease_list, anatomy_list, \
with_biopsy, with_mask, sorted=False, from_list=None):
'''
Selects file paths for training based on Jiahao's TFGRAND5 name convention
for tumor disease type, biopsy & mask availability, and anatomies.
Inputs: ROOT - folder containing all TFGRAND5 records. Will be globbed for
list of files
disease_list - list of diseases to be gathered e.g. ['nml', 'jzl'].
If set to None, all diseases are selected
anatomy_list - list of anatomies to be selected e.g. ['123']. If
set to None, all anatomies are selected
with_biopsy - if T, select only files with biopsy ground truth
with_mask - if T, select only files with masks
sorted - if T, sorts globbed files (essential if fixed
randomization seed is used later on for train/valid split)
from_list - if not None, filenames from this list will be used for
selection based on disease type, biopsy & mask
availability, and anatomies instead of glob from ROOT.
If this is used, ROOT can be set to None
Outputs: list of file paths (string) that meet selection criteria
'''
# Glob ROOT only once
if not hasattr(selectedDiseasePath, "globbed_paths"):
selectedDiseasePath.globbed_paths = []
if MPI.COMM_WORLD.Get_rank() == 0:
print("Globbing files...", end='')
sys.stdout.flush()
selectedDiseasePath.globbed_paths = glob.glob(ROOT + '/*')
print("Done")
selectedDiseasePath.globbed_paths = \
MPI.COMM_WORLD.bcast(selectedDiseasePath.globbed_paths, root=0)
disease_paths = []
# Whether to select from a predefined list or from glob of ROOT
if from_list is not None:
paths = from_list
else:
paths = selectedDiseasePath.globbed_paths
if len(disease_list) == 0 and len(anatomy_list) == 0:
# select all diseases
disease_paths = paths
elif len(disease_list) == 0 and len(anatomy_list) > 0:
for anatomy in anatomy_list:
disease_paths += [f for f in paths \
if all(a in os.path.basename(f).split('_')[1] \
for a in list(anatomy))]
elif len(disease_list) > 0 and len(anatomy_list) == 0:
for disease in disease_list:
disease_paths += [f for f in paths \
if fnmatch.fnmatch(os.path.basename(f), disease + '*')]
elif len(disease_list) > 0 and len(anatomy_list) > 0:
for disease in disease_list:
for anatomy in anatomy_list:
fset = [f for f in paths \
if fnmatch.fnmatch(os.path.basename(f), disease + '*')]
disease_paths += [f for f in fset \
if all(a in os.path.basename(f).split('_')[1] \
for a in list(anatomy))]
# Remove duplicates
disease_paths = list(set(disease_paths))
if with_biopsy:
disease_paths = [p for p in disease_paths \
if os.path.basename(p).split('_')[2] == '1']
if with_mask:
disease_paths = [p for p in disease_paths \
if any(i == '1' for i in os.path.basename(p).split('_')[3:7])]
# Sort gathered file paths
if sorted:
disease_paths.sort()
return disease_paths
class Data:
# train_list, valid_list, num_reps_train_golden, ...
# ... num_reps_train_extra, with_biopsy_golden, with_mask_golden
TFLibrary = { \
'others' : [[], [], 5 , 2 , True, True], \
'bpy' : [[], [], 1 , 1 , True, True], \
'tsjl' : [[], [], 1 , 1 , True, True], \
'nml' : [[], [], 1 , 0.5, True, True], \
'jzl' : [[], [], 1 , 0 , True, True], \
'xxxbl' : [[], [], 1 , 0.3, True, True], \
'ctl' : [[], [], 1 , 0.2, True, True], \
'jsl' : [[], [], 1 , 1 , True, True], \
'smxbl' : [[], [], 1 , 1 , True, True], \
'lygl' : [[], [], 1 , 0.5, True, True], \
'sgml' : [[], [], 1 , 1 , True, True], \
'jzmxbl' : [[], [], 1 , 1 , True, True], \
'xgmxbl' : [[], [], 1 , 1 , True, True], \
'xgwpxbl' : [[], [], 2 , 2 , True, True], \
'szxbl' : [[], [], 3 , 1 , True, True], \
'lbl' : [[], [], 3 , 1 , True, True], \
'jxbl' : [[], [], 1 , 1 , True, True], \
'pynz' : [[], [], 4 , 1 , True, True], \
'mlcrt' : [[], [], 3 , 1 , True, True], \
'nnz' : [[], [], 2 , 2 , True, True], \
'jjmql' : [[], [], 5 , 5 , True, True], \
'zyl' : [[], [], 5 , 5 , True, True], \
'jtl' : [[], [], 2 , 2 , True, True], \
'cyxnz' : [[], [], 6 , 3 , True, True], \
'klxbl' : [[], [], 6 , 3 , True, True], \
'nnc' : [[], [], 1 , 1 , True, True], \
'DNET' : [[], [], 3 , 3 , True, True], \
'sjql' : [[], [], 1 , 1 , True, True], \
'hssl' : [[], [], 6 , 6 , True, True]}
# key in TFLibrary, orig TFRecord index, modified index
DISEASE_MAPPER = { \
'others': ('others', 0, 0, ''), \
'biao_pi_yang': ('bpy', 1, 1, ''), \
'ting_shen_jing_liu': ('tsjl', 2, 2, ''), \
'nao_mo_liu': ('nml', 3, 3, ''), \
'jiao_zhi_liu': ('jzl', 4, 4, ''), \
'xing_xing_xi_bao_liu': ('xxxbl', 5, 5, ''), \
'chui_ti_liu': ('ctl', 6, 6, ''), \
'ji_suo_liu': ('jsl', 7, 7, ''), \
'sui_mu_xi_bao_liu': ('smxbl', 8, 8, ''), \
'lu_yan_guan_liu': ('lygl', 9, 9, ''), \
'shi_guan_mo_liu': ('sgml', 10, 10, ''), \
'jiao_zhi_mu_xi_bao_liu': ('jzmxbl', 11, 11, ''), \
'xue_guan_mu_xi_bao_liu': ('xgmxbl', 12, 12, ''), \
'xue_guan_wai_pi_xi_bao_liu': ('xgwpxbl', 13, 13, ''), \
'sheng_zhi_xi_bao_liu': ('szxbl', 14, 14, ''), \
'lin_ba_liu': ('lbl', 15, 15, ''), \
'jie_xi_bao_liu': ('jxbl', 16, 16, ''), \
'pi_yang_nang_zhong': ('pynz', 17, 17, ''), \
'mai_luo_cong_ru_tou_zhuang_liu': ('mlcrt', 18, 18, ''), \
'nao_nong_zhong': ('nnz', 19, 0, 'others'), \
'jing_jing_mai_qiu_liu': ('jjmql', 20, 20, ''), \
'zhuan_yi_liu': ('zyl', 21, 21, ''), \
'ji_tai_liu': ('jtl', 23, 23, ''), \
'chang_yuan_xing': ('cyxnz', 24, 0, 'others'), \
'ke_li_xi_bao': ('klxbl', 25, 0, 'others'), \
'nao_nang_cong': ('nnc', 26, 0, 'others'), \
'DNET': ('DNET', 27, 27, ''), \
'shen_jing_qiao_liu': ('sjql', 28, 28, ''), \
'hei_se_su_liu': ('hssl', 29, 0, 'others'), \
}
TFLibrary_REMAP = None
DISEASE_REMAPPER = None
TF_TRAIN_RECORD_PATH = None
TOTAL_TRAIN_DATA = None
TF_VALID_RECORD_PATH = None
TOTAL_VALID_DATA = None
image_size = None
crop_size = None
valid_batch_size = None
# Disease grouping
disease_set = [ \
('jzl','jzmxbl','sgml','xxxbl'), \
('szxbl','jtl'), \
('tsjl','sjql'), \
]
def __init__(self, root_dir=None, anatomies=[], biopsy_only=False, mask_only=False, \
series_kprob=(1,1,1,1), train_valid_seed=1234, valid_list=None, \
clsmatcoeff=(0,1.0), water_mask=False, testing=False):
self.testing = testing
self.ROOT = root_dir
assert os.path.exists(self.ROOT), "ERROR: Data root dir " + self.ROOT + " does not exist"
if self.testing:
for k in self.TFLibrary.keys():
self.TFLibrary[k][2], self.TFLibrary[k][3] = (1,1)
if MPI.COMM_WORLD.Get_rank() == 0:
print("DATASET SCRIPT 2C-WD", end="")
if self.testing:
print(" TEST MODE")
else:
print("")
if valid_list is not None:
print("NOTE: Reading validation list from " + valid_list)
mapper = {}
for ikey, iattribute in self.DISEASE_MAPPER.items():
if iattribute[3] == '' or iattribute[3] is None:
ivalue = iattribute[:3] + (iattribute[0],)
else:
ivalue = iattribute
mapper.update({ikey:ivalue})
self.DISEASE_MAPPER = mapper
assert len(series_kprob) >= 4, "ERROR: len(series_kprob) < 4"
if len(series_kprob) > 4:
if hvd.rank() == 0:
print("WARNING: Truncating series_kprob to len 4")
self.series_kprob = np.asarray(series_kprob[:4], dtype=np.float32)
# cls mat coeff
self.neg_matcoeff = clsmatcoeff[0]
self.pos_matcoeff = clsmatcoeff[1]
self.water_mask = water_mask
# set of unique modified indices
unq_mod = set(sorted(k[2] for k in self.DISEASE_MAPPER.values()))
# set of unique modified keys
unq_modkey = set([k[3] for k in self.DISEASE_MAPPER.values()])
# make sure bijective modkey:mod
assert len(unq_mod) == len(unq_modkey), \
"ERROR: mod:modkey not bijective"
# Remap to 0-n_class
# dict to map modifiied indices to sequential indices
mod_to_remap = {mod:remap for remap, mod in enumerate(unq_mod)}
# dict to map orig index to sequential indices
orig_to_remap = {orig:mod_to_remap[mod] \
for key, orig, mod, modkey in self.DISEASE_MAPPER.values()}
# convert orig_to_remap dict to indexed list for TF
remap = max([k for k,v in orig_to_remap.items()])
self.remap = np.zeros(remap+1, dtype=np.int32)
for key, value in orig_to_remap.items():
self.remap[key] = value
# Create threadsafe Random object & initialize seed
self.random = random.Random(train_valid_seed)
# Init training set by selecting all diseases present in TFlibrary
# 'golden' set (biopsy & mask present) is used for validation
for idisease in self.TFLibrary.keys():
disease_train = []
disease_valid = []
# Get config, bounds & count attributes for this disease
biopsy_flag = self.TFLibrary[idisease][4]
mask_flag = self.TFLibrary[idisease][5]
disease_paths = selectedDiseasePath( \
self.ROOT, [idisease], anatomies, \
with_biopsy=biopsy_flag, with_mask=mask_flag, sorted=True)
n_data = len(disease_paths)
n_train_reps = self.TFLibrary[idisease][2]
if n_data < 2:
"WARNING: n_data < 2 for " + idisease
n_valid = 0
else:
if valid_list is None:
# Compute approx. 20-39 validation cases
if 20.0/n_data < (1.0/5):
n_valid = 20 + (n_data-1)%20 + 1
elif n_data > 20:
n_valid = (n_data-1)%20 + 1
else:
n_valid = max(1, int(1.0*n_data/3))
# Split data into train & val, oversample based on config
assert n_valid < n_data, \
"ERROR: n_valid >= # Data for " + idisease
else:
n_valid = 0
if MPI.COMM_WORLD.Get_rank() == 0:
self.random.shuffle(disease_paths)
disease_paths = MPI.COMM_WORLD.bcast(disease_paths, root=0)
if n_train_reps >= 1:
disease_train = \
disease_paths[0:n_data - n_valid]*n_train_reps
elif n_train_reps > 0:
disease_train = disease_paths[0:n_data - n_valid]
n_trunc = int(n_train_reps*len(disease_train))
disease_train = disease_train[0:n_trunc]
else:
disease_train = []
if n_valid > 0:
disease_valid = disease_paths[n_data - n_valid:]
else:
disease_valid = []
self.TFLibrary[idisease][0] += disease_train
self.TFLibrary[idisease][1] += disease_valid
# Add back non-biopsy & non-mask as training data
for idisease in self.TFLibrary.keys():
disease_train = []
# Get config, bounds & count attributes for this disease
disease_paths = selectedDiseasePath( \
self.ROOT, [idisease], anatomies, \
with_biopsy=biopsy_only, with_mask=mask_only, sorted=True)
# Add only samples that have not been added previously
# Remove overlaps with origianl training set
disease_paths = [k for k in disease_paths \
if k not in set(self.TFLibrary[idisease][0])]
# Remove overlap with validation set
disease_paths = [k for k in disease_paths \
if k not in set(self.TFLibrary[idisease][1])]
n_data = len(disease_paths)
# Replicate n times
n_train_reps = self.TFLibrary[idisease][3]
if n_train_reps >= 1:
disease_train = disease_paths*n_train_reps
elif n_train_reps > 0:
n_trunc = int(n_train_reps*len(disease_paths))
disease_train = disease_paths[0:n_trunc]
else:
disease_train = []
#print "ADD ", idisease, n_data, len(self.TFLibrary[idisease][0])
# Append to original training list
self.TFLibrary[idisease][0] += disease_train
# If we are to read validation list from a file
if valid_list is not None:
file_list = None
if MPI.COMM_WORLD.Get_rank() == 0:
fopen = open(valid_list, 'r')
file_list = fopen.read().splitlines()
fopen.close()
for idisease in self.TFLibrary.keys():
disease_paths = selectedDiseasePath( \
self.ROOT, [idisease], anatomies, \
with_biopsy=biopsy_flag, with_mask=mask_flag, \
sorted=True, from_list=file_list)
self.TFLibrary[idisease][1] = disease_paths
for idisease in self.TFLibrary.keys():
self.TFLibrary[idisease][1] = \
MPI.COMM_WORLD.bcast(self.TFLibrary[idisease][1], root=0)
# Make sure no overlap with validation
for idisease in self.TFLibrary.keys():
valid_basename = [os.path.basename(k) \
for k in self.TFLibrary[idisease][1]]
self.TFLibrary[idisease][0] = \
[k for k in self.TFLibrary[idisease][0] \
if os.path.basename(k) not in valid_basename]
# Create grand list for training
self.TF_TRAIN_RECORD_PATH = []
self.TF_VALID_RECORD_PATH = []
for idisease, iattribute in self.TFLibrary.items():
self.TF_TRAIN_RECORD_PATH += iattribute[0]
self.TF_VALID_RECORD_PATH += iattribute[1]
self.TOTAL_TRAIN_DATA = len(self.TF_TRAIN_RECORD_PATH)
self.TOTAL_VALID_DATA = len(self.TF_VALID_RECORD_PATH)
# Check validity of file paths
for i in self.TF_TRAIN_RECORD_PATH:
assert os.path.exists(i), 'no such file {}'.format(i)
for i in self.TF_VALID_RECORD_PATH:
assert os.path.exists(i), 'no such file {}'.format(i)
# Shuffle val data on rank 0 then bcast (may be redundant?)
if MPI.COMM_WORLD.Get_size() > 1:
if MPI.COMM_WORLD.Get_rank() == 0:
self.random.shuffle(self.TF_VALID_RECORD_PATH)
self.TF_VALID_RECORD_PATH = \
MPI.COMM_WORLD.bcast(self.TF_VALID_RECORD_PATH, root=0)
# Shuffle train data on rank 0 then bcast
if MPI.COMM_WORLD.Get_rank() == 0:
self.random.shuffle(self.TF_TRAIN_RECORD_PATH)
self.TF_TRAIN_RECORD_PATH = \
MPI.COMM_WORLD.bcast(self.TF_TRAIN_RECORD_PATH, root=0)
# cls weight distribution
# dict to map from orig key to mod key
key_to_modkey = {v[0]:v[3] for v in self.DISEASE_MAPPER.values()}
# Convert TFLibrary to TFLibrary_REMAP
self.TFLibrary_REMAP = {}
for ikey, iattribute in self.TFLibrary.items():
modkey = key_to_modkey[ikey]
if modkey in self.TFLibrary_REMAP:
self.TFLibrary_REMAP[modkey][0] += iattribute[0]
self.TFLibrary_REMAP[modkey][1] += iattribute[1]
else:
self.TFLibrary_REMAP[modkey] = iattribute[:2]
# convert DISEASE_MAPPER value tuple to dict
# dmap = map of modkey to mod index
# dict to map modkey to sequential indices # {'bpy':1, ...}
self.dmap = {key_to_modkey[v[0]]:orig_to_remap[v[1]] \
for k, v in self.DISEASE_MAPPER.items()}
# Ensure no indices are duplicated
cls_lbl = [v for k, v in self.dmap.items()]
assert len(set(cls_lbl)) == len(cls_lbl), "Error: Duplicate cls label"
assert (max(cls_lbl)+1 == len(cls_lbl) and min(cls_lbl) == 0), \
"Error: Class label not consecutive from 0 - # labels"
cls_pop = np.zeros(len(cls_lbl), dtype=int)
for imodkey, imod in self.dmap.items():
n_train_data = len(self.TFLibrary_REMAP[imodkey][0])
cls_pop[imod] += n_train_data
cls_pop[cls_pop == 0] = -1
cls_weights = 1.0/cls_pop
cls_weights[cls_pop < 0] = 0
cls_pop[cls_pop < 0] = 0
cls_weights = cls_weights/max(np.max(cls_weights),1e-16)
self.loss_clsweights1 = list(cls_weights)
## OTHER METADATA ##
self.image_size = (24,320,320,1) # Orig size
if self.testing:
self.downsample_size = (24,128,128,1) # Downscale x-y
self.crop_size = (24,128,128,1) # be careful if using unet maxpool
else:
self.downsample_size = (24,256,256,1) # Downscale x-y
self.crop_size = (24,256,256,1) # be careful if using unet maxpool
self.output_classes1 = len(cls_lbl)
if self.water_mask:
self.output_segclasses = 3 # BG=0/Tumor=1/Water=2
else:
self.output_segclasses = 2
self.input_channels = 4 # [t1, t1c, t2, dwi]
self.nloc = 4
self.nage = 6 # None + >0,10,20,40,60
self.nsex = 3 # None + M,F
self.batchset = { \
'train' : (self.TF_TRAIN_RECORD_PATH, self.TOTAL_TRAIN_DATA), \
'valid' : (self.TF_VALID_RECORD_PATH, self.TOTAL_VALID_DATA)}
self.mask_size = self.crop_size[0:-1] + (self.output_segclasses,)
#segw = 1.0/np.array([11731282776, 52431861, 13562310])
#segw = segw/np.sum(segw) [0.00091765 0.20531911 0.79376324]
self.loss_segweights = [0.001, 0.3, 1.0]
if not self.water_mask:
self.loss_segweights = [0.001, 1.3]
# Wasserstein
reldisease = {}
for iset in self.disease_set:
for idisease in iset:
reldisease[idisease] = iset
self.tree_M = np.ones( \
(self.output_classes1, self.output_classes1), dtype=np.float32)
for idisease in self.dmap.keys():
if idisease not in reldisease.keys():
id = self.dmap[idisease]
self.tree_M[id,id] = 0
else:
for ireldisease in reldisease[idisease]:
id = self.dmap[idisease]
ir = self.dmap[ireldisease]
if id == ir:
self.tree_M[id,ir] = 0
else:
self.tree_M[id,ir] = 0.5
# Big grouping
# Fine group index to parent fine group index
self.fmap_to_lmap = {k:k for k in range(self.output_classes1)}
for iset in self.disease_set:
lid = self.dmap[iset[0]]
for idisease in iset[1:]:
fid = self.dmap[idisease]
self.fmap_to_lmap[fid] = lid
bmap = set(k for k in self.fmap_to_lmap.values())
self.output_classes0 = len(bmap)
# Parent fine group index to big group index
self.lmap_to_bmap = {v:k for k,v in enumerate(sorted(bmap))}
# Fine group index to big group index
self.fmap_to_bmap = np.zeros(len(self.fmap_to_lmap), dtype=np.int32)
for fmap, lmap in self.fmap_to_lmap.items():
self.fmap_to_bmap[fmap] = self.lmap_to_bmap[lmap]
#self.fmap_to_bmap = \
# {k:self.lmap_to_bmap[v] for k,v in self.fmap_to_lmap.items()}
self.nclass_mat = self.neg_matcoeff*np.ones( \
(self.output_classes0, self.output_classes1), dtype=np.float32)
for fid in range(self.output_classes1):
self.nclass_mat[self.fmap_to_bmap[fid],fid] = self.pos_matcoeff
self.nclass_mat = np.transpose(self.nclass_mat)
# Big class loss weights
self.loss_clsweights0 = np.zeros( \
self.output_classes0 ,dtype=np.float32)
for fid in range(self.output_classes1):
self.loss_clsweights0[self.fmap_to_bmap[fid]] += \
1.0/self.loss_clsweights1[fid]
self.loss_clsweights0 = 1.0/self.loss_clsweights0
self.loss_clsweights0 /= np.max(self.loss_clsweights0)
if MPI.COMM_WORLD.Get_rank() == 0:
print("DATASET DIR : ", self.ROOT)
print("ANATOMIES : ", anatomies)
print("BIOPSY ONLY : ", biopsy_only)
print("MASK ONLY : ", mask_only)
print("CROP SIZE : ", self.crop_size)
print("NCLASS0 : ", self.output_classes0)
print("NCLASS1 : ", self.output_classes1)
np.set_printoptions(linewidth=256)
print("NCLASSMAT :\n%s" % np.transpose(self.nclass_mat))
def listDataFiles(self, batchname):
print("DATASET_NAME: ", batchname)
for i, ifile in enumerate(self.batchset[batchname][0]):
print("%5d %s" % (i, ifile))
def getDataCount(self):
sorted_info = [(self.dmap[k], k, len(v[0]), len(v[1]), \
self.loss_clsweights1[self.dmap[k]]) \
for k,v in self.TFLibrary_REMAP.items()]
sorted_info = sorted(sorted_info, key=lambda x: x[0])
print("%-2s %-10s %-5s %-5s %-7s" % \
("#", "cls_name", "n_trn", "n_val", "cls_wt"))
for c_label, c_name, n_train, n_valid, c_weight in sorted_info:
print("%2d %-10s %5d %5d %7.5f" % \
(c_label, c_name, n_train, n_valid, c_weight))
bmap_to_fmap = [[] for k in range(self.output_classes0)]
for fid in range(self.output_classes1):
bmap_to_fmap[self.fmap_to_bmap[fid]] += [fid]
print("\nBig Classification")
print("%-2s %-10s %-7s %-2s %-10s" % \
("#", "bcls_name", "bcls_wt", "#", "fcls_name"))
for bid, fids in enumerate(bmap_to_fmap):
first_fid = True
for fid in fids:
if first_fid:
b_name = sorted_info[fid][1]
b_wt = ("%7.5f" % self.loss_clsweights0[bid])
c_name = b_name
first_fid = False
else:
bid = ""
b_name= ""
b_wt = ""
c_name = sorted_info[fid][1]
print("%2s %-10s %7s %2s %-10s" % \
(str(bid), b_name, b_wt, fid, c_name))
def getDataSize(self, batchname):
return self.batchset[batchname][1]
def getNLoc(self):
return self.nloc
def getNChannels(self):
return self.input_channels
def getOrigSize(self):
return self.image_size
def getCropSize(self):
return self.crop_size
def getMaskSize(self):
return self.mask_size
def getInputChannels(self):
return self.input_channels
def getOutputClasses0(self):
return self.output_classes0
def getOutputClasses1(self):
return self.output_classes1
def getOutputSegClasses(self):
return self.output_segclasses
def getValidBatchSize(self):
return self.valid_batch_size
def getTrainBatchSize(self):
return self.train_batch_size
def getTestBatchSize(self):
return self.test_batch_size
def getLossClsWeights1(self):
return self.loss_clsweights1
def getLossClsWeights0(self):
return self.loss_clsweights0
def getLossSegWeights(self):
return self.loss_segweights
def getNClassMat(self):
return self.nclass_mat
def getSize(self, batchname):
return self.batchset[batchname][1]
def readDecode(self, dataset_in):
# Read raw TFRecord string and decode them into tensor etc. objects
# Parse TFRecord entries
feature_list = { \
'age' : tf.FixedLenFeature([], tf.int64),
'gender' : tf.FixedLenFeature([], tf.int64),
'label' : tf.FixedLenFeature([], tf.int64),
'tail' : tf.FixedLenFeature([], tf.int64),
'cyst' : tf.FixedLenFeature([], tf.int64),
'examNo' : tf.FixedLenFeature([], tf.int64),
'anatomy' : tf.FixedLenFeature([], tf.string),
'mask_t2' : tf.FixedLenFeature([], tf.string),
'mask_tc' : tf.FixedLenFeature([], tf.string),
'mask_sagC' : tf.FixedLenFeature([], tf.string),
'mask_corC' : tf.FixedLenFeature([], tf.string),
't1' : tf.FixedLenFeature([], tf.string),
't2' : tf.FixedLenFeature([], tf.string),
'tc' : tf.FixedLenFeature([], tf.string),
'sagC' : tf.FixedLenFeature([], tf.string),
'corC' : tf.FixedLenFeature([], tf.string),
'sagDim' : tf.FixedLenFeature([], tf.string),
'corDim' : tf.FixedLenFeature([], tf.string),
'filename' : tf.FixedLenFeature([], tf.string),
'adc' : tf.FixedLenFeature([], tf.string),
'dwi1000' : tf.FixedLenFeature([], tf.string)}
features = tf.parse_single_example(dataset_in, features=feature_list)
examno = tf.cast(features['examNo'], tf.int32)
age = tf.cast(features['age'], tf.int32)
sex = tf.cast(features['gender'], tf.int32)
label = tf.cast(features['label'], tf.int32) # Classification index
loc = tf.cast(tf.decode_raw(features['anatomy'], tf.float64), tf.int32)
t1 = tf.decode_raw(features['t1'], tf.int16)
t2 = tf.decode_raw(features['t2'], tf.int16)
tc = tf.decode_raw(features['tc'], tf.int16)
dwi = tf.decode_raw(features['dwi1000'], tf.int16)
mask = tf.decode_raw(features['mask_t2'], tf.int16)
maskC = tf.decode_raw(features['mask_tc'], tf.int16)
loc = tf.reshape(loc, (self.nloc,), name="LOC_RESHAPE")
t1 = tf.reshape(t1, self.image_size, name="T1_RESHAPE")
t2 = tf.reshape(t2, self.image_size, name="T2_RESHAPE")
tc = tf.reshape(tc, self.image_size, name="TC_RESHAPE")
dwi = tf.reshape(dwi, self.image_size, name="DWI_RESHAPE")
mask = tf.reshape(mask, self.image_size, name="MASK_RESHAPE")
maskC = tf.reshape(maskC, self.image_size, name="MASKC_RESHAPE")
# Get tumor & water masks
zeromask = tf.zeros(self.image_size, dtype=tf.int16)
onesmask = tf.ones(self.image_size, dtype=tf.int16)
# tumor mask (Cast water to 0)
tmask = tf.where(tf.equal(mask, 35), zeromask, mask)
tmaskC = tf.where(tf.equal(maskC, 35), zeromask, maskC)
# water mask
wmask = tf.where(tf.equal(mask, 35), onesmask, zeromask)
wmaskC = tf.where(tf.equal(maskC, 35), onesmask, zeromask)
t1 = tf.cast(t1, tf.float32)
t2 = tf.cast(t2, tf.float32)
tc = tf.cast(tc, tf.float32)
dwi = tf.cast(dwi, tf.float32)
mask = tf.cast(mask, tf.float32)
maskC = tf.cast(maskC, tf.float32)
wmask = tf.cast(wmask, tf.float32)
wmaskC = tf.cast(wmaskC, tf.float32)
""" Make use of any Mask from T2 or TC """
# Merge T1C and T2 mask
heat = tf.round(tf.add(tmask, tmaskC))
heat = tf.cast(tf.cast(heat, tf.bool), tf.int32)
# Merge T1C and T2 mask
wheat = tf.round(tf.add(wmask, wmaskC))
wheat = tf.cast(tf.cast(wheat, tf.bool), tf.int32)
# Remap label from 0-n_class
remap = tf.convert_to_tensor(self.remap, dtype=tf.int32)
label = remap[label]
return t1, t2 ,tc, dwi, heat, wheat, label, examno, age, sex, loc
def dataAugPrep(self, t1, t2, tc, dwi, heat, wheat, \
label, age, sex, training=True):
def age_bin(age):
return tf.cond( \
age < 0, lambda: 0, lambda: tf.cond( \
age < 10, lambda: 1, lambda: tf.cond( \
age < 20, lambda: 2, lambda: tf.cond( \
age < 40, lambda: 3, lambda: tf.cond( \
age < 60, lambda: 4, lambda: 5)))))
def dropSeries(data, kprob, training):
DROP = tf.cond(tf.greater(tf.random_uniform( \
[1], minval=0, maxval=1, dtype=tf.float32)[0], kprob), \
lambda: tf.constant(0, tf.float32), \
lambda: tf.constant(1.0, tf.float32))
data *= DROP
return data
def addNoiseNormalize(data, box, training):
data = tf.divide(data, 255.)
if training:
# Random DC offset
data += tf.random_normal([1], mean=0, stddev=0.06)[0]
# Random brightness scaling
data *= (1.0 + tf.random_normal([1], mean=0, stddev=0.06)[0])
# Random noise
data += tf.random_normal(box, mean=0, stddev=0.07)
data = tf.clip_by_value(data, 0.0, 2.0)
return data
def getInChannels(t1, t2, tc, dwi):
t1_in = tf.cast(tf.greater(tf.reduce_mean(t1), 1.1), tf.int32)
t2_in = tf.cast(tf.greater(tf.reduce_mean(t2), 1.1), tf.int32)
tc_in = tf.cast(tf.greater(tf.reduce_mean(tc), 1.1), tf.int32)
dwi_in = tf.cast(tf.greater(tf.reduce_mean(dwi), 1.1), tf.int32)
return tf.stack([t1_in, t2_in, tc_in, dwi_in], axis=0)
# TEMP convert heat to float32 for rotation etc.
heat = tf.cast(heat, tf.float32)
wheat = tf.cast(wheat, tf.float32)
# Downsample
t1, t2, tc, dwi, heat, wheat = TFDownsample( \
t1, t2, tc, dwi, heat, wheat, self.downsample_size, self.image_size)
# Distort input
t1, t2, tc, dwi, heat, wheat = \
TFAugmentation(t1, t2, tc, dwi, heat, wheat, \
image_shape=self.downsample_size, crop_shape=self.crop_size, \
Training=training)
# Drop series
t1 = dropSeries(t1 , self.series_kprob[0], training)
t2 = dropSeries(t2 , self.series_kprob[1], training)
tc = dropSeries(tc , self.series_kprob[2], training)
dwi = dropSeries(dwi, self.series_kprob[3], training)
# Get InChannels
channels_in = getInChannels(t1, t2, tc, dwi)
# Add noise to records
t1 = addNoiseNormalize(t1 , self.crop_size, training)
t2 = addNoiseNormalize(t2 , self.crop_size, training)
tc = addNoiseNormalize(tc , self.crop_size, training)
dwi = addNoiseNormalize(dwi, self.crop_size, training)
# Convert segmentation mask to [0,1]
heat = tf.round(heat)
heat = tf.cast(tf.cast(heat, tf.bool), tf.int32)
# Convert segmentation mask to [0,1]
wheat = tf.round(wheat)
wheat = tf.cast(tf.cast(wheat, tf.bool), tf.int32)
# Convert classification label to one-hot with '0' as bg mask
label1_onehot = tf.one_hot(label, self.output_classes1, dtype=tf.int32)
# Remap label from 0-n_class
remap = tf.convert_to_tensor(self.fmap_to_bmap, dtype=tf.int32)
label0_onehot = tf.one_hot( \
remap[label], self.output_classes0, dtype=tf.int32)
# BG (no tumor & no water)
heat_bg = tf.cast(tf.cast(tf.add(heat, wheat), tf.bool), tf.int32)
heat_bg = tf.subtract(1, heat_bg)
if self.water_mask:
# Remove overlap between tumor & water by prioritizing tumor
heat_wt = tf.clip_by_value(tf.subtract(tf.add(heat, wheat), 1), 0, 1)
heat_wt = tf.clip_by_value(tf.subtract(wheat, heat_wt), 0, 1)
# Concat mutually-exclusive masks as channels
heat_seg = tf.concat([heat_bg, heat, heat_wt], axis=-1)
else:
heat_seg = tf.concat([heat_bg, 1-heat_bg], axis=-1)
age_onehot = tf.one_hot(age_bin(age), self.nage, dtype=tf.int32)
sex_onehot = tf.one_hot(sex, self.nsex, dtype=tf.int32)
return t1, t2 ,tc, dwi, heat_seg, label0_onehot, label1_onehot, \
age_onehot, sex_onehot, channels_in
def batchAugPrep(self, t1, t2, tc, dwi, heat, wheat, label, \
age, sex, tf_training):
def mapWrapper(debatched_list, tf_training):
t1 = debatched_list[0]
t2 = debatched_list[1]
tc = debatched_list[2]
dwi = debatched_list[3]
heat = debatched_list[4]
wheat = debatched_list[5]
label = debatched_list[6]
age = debatched_list[7]
sex = debatched_list[8]
out = tf.cond(tf_training, \
lambda: self.dataAugPrep( \
t1, t2, tc, dwi, heat, wheat, label, age, sex, training=True), \
lambda: self.dataAugPrep( \
t1, t2, tc, dwi, heat, wheat, label, age, sex, training=False))
return out
batch_tuple = (t1, t2, tc, dwi, heat, wheat, label, age, sex)
batch_dtype = (tf.float32, tf.float32, tf.float32, tf.float32, \
tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32)
t1, t2, tc, dwi, heat_seg, label0_onehot, label1_onehot, \
age_onehot, sex_onehot, channels_in = tf.map_fn( \
lambda x: mapWrapper(x, tf_training), batch_tuple, \
dtype=batch_dtype, back_prop=False)
# Fix shape
t1 = tf.reshape(t1, (-1,) + self.crop_size, name="PREP_T1_RESHAPE")
t2 = tf.reshape(t2, (-1,) + self.crop_size, name="PREP_T2_RESHAPE")
tc = tf.reshape(tc, (-1,) + self.crop_size, name="PREP_TC_RESHAPE")
dwi = tf.reshape(dwi, (-1,) + self.crop_size, name="PREP_DWI_RESHAPE")
heat_seg = tf.reshape(heat_seg, (-1,) + self.mask_size, \
name="PREP_HEAT_RESHAPE")
label0_onehot = tf.reshape(label0_onehot, (-1, self.output_classes0), \
name="PREP_LABEL0_RESHAPE")
label1_onehot = tf.reshape(label1_onehot, (-1, self.output_classes1), \
name="PREP_LABEL1_RESHAPE")
age_onehot = tf.reshape(age_onehot, \
(-1,self.nage), name="PREP_NAGE_RESHAPE")
sex_onehot = tf.reshape(sex_onehot, \
(-1,self.nsex), name="PREP_NSEX_RESHAPE")
channels_in = tf.reshape(channels_in, (-1,self.input_channels), \
name="PREP_INCHANNELS_RESHAPE")
return t1, t2, tc, dwi, heat_seg, label0_onehot, label1_onehot, \
age_onehot, sex_onehot, channels_in
def dummyData(self, n=1):
dummy_t = np.zeros((n,) + self.image_size, dtype=np.float32)
dummy_l = np.zeros((n,), dtype=np.int32)
dummy_v = np.zeros((n,self.nloc), dtype=np.int32)
return dummy_t, dummy_t, dummy_t, dummy_t, dummy_t, dummy_t, \
dummy_l, dummy_l, dummy_l, dummy_l, dummy_v
def reshuffleFileList(self, setname):
file_list, num_records = self.batchset[setname]
# Reshuffle root 0 then bcast
if MPI.COMM_WORLD.Get_rank() == 0:
self.random.shuffle(file_list)
file_list = MPI.COMM_WORLD.bcast(file_list, root=0)
self.batchset[setname] = (file_list, num_records)
def generateBatch(self, setname, batchsize, shufflesize, \
shuffle_batch=True, num_shards=1, worker_rank=0, \
repeat=-1, prefetch_gpu=False):
assert (setname in self.batchset), "setname not in batchset"
files = self.batchset[setname][0]
with tf.device('/cpu:0'):
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.shard(num_shards, worker_rank)
if shuffle_batch:
dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat( \
shufflesize, repeat, seed=1234))
else:
dataset = dataset.repeat(repeat)
dataset = dataset.apply( \
tf.contrib.data.map_and_batch(lambda x: self.readDecode(x), \
batchsize, num_parallel_batches=6))
if prefetch_gpu:
dataset = dataset.apply(tf.contrib.data.prefetch_to_device( \
'/gpu:0', buffer_size=1))
else:
dataset = dataset.prefetch(buffer_size=1)
iterator = dataset.make_initializable_iterator()
return iterator
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module implements an interface to enumlib, Gus Hart"s excellent Fortran
code for enumerating derivative structures.
This module depends on a compiled enumlib with the executables multienum.x and
makestr.x available in the path. Please download the library at
http://enum.sourceforge.net/ and follow the instructions in the README to
compile these two executables accordingly.
If you use this module, please cite the following:
Gus L. W. Hart and Rodney W. Forcade, "Algorithm for generating derivative
structures," Phys. Rev. B 77 224115 (26 June 2008)
Gus L. W. Hart and Rodney W. Forcade, "Generating derivative structures from
multilattices: Application to hcp alloys," Phys. Rev. B 80 014120 (July 2009)
Gus L. W. Hart, Lance J. Nelson, and Rodney W. Forcade, "Generating
derivative structures at a fixed concentration," Comp. Mat. Sci. 59
101-107 (March 2012)
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Jul 16, 2012"
import re
import math
import subprocess
import itertools
import logging
import numpy as np
from monty.fractions import lcm
from monty.fractions import fractions
from six.moves import reduce
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import DummySpecie
from monty.os.path import which
from monty.dev import requires
from monty.tempfile import ScratchDir
logger = logging.getLogger(__name__)
# Favor the use of the newer "enum.x" by Gus Hart instead of the older
# "multienum.x"
enum_cmd = which('multienum.x')
@requires(enum_cmd and which('makestr.x'),
"EnumlibAdaptor requires the executables 'enum.x' or 'multienum.x' "
"and 'makestr.x' to be in the path. Please download the library at"
"http://enum.sourceforge.net/ and follow the instructions in "
"the README to compile these two executables accordingly.")
class EnumlibAdaptor(object):
"""
An adaptor for enumlib.
.. attribute:: structures
List of all enumerated structures.
"""
amount_tol = 1e-5
def __init__(self, structure, min_cell_size=1, max_cell_size=1,
symm_prec=0.1, enum_precision_parameter=0.001,
refine_structure=False, check_ordered_symmetry=True):
"""
Initializes the adapter with a structure and some parameters.
Args:
structure: An input structure.
min_cell_size (int): The minimum cell size wanted. Defaults to 1.
max_cell_size (int): The maximum cell size wanted. Defaults to 1.
symm_prec (float): Symmetry precision. Defaults to 0.1.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
refine_structure (bool): If you are starting from a structure that
has been relaxed via some electronic structure code,
it is usually much better to start with symmetry determination
and then obtain a refined structure. The refined structure have
cell parameters and atomic positions shifted to the expected
symmetry positions, which makes it much less sensitive precision
issues in enumlib. If you are already starting from an
experimental cif, refinement should have already been done and
it is not necessary. Defaults to False.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
"""
if refine_structure:
finder = SpacegroupAnalyzer(structure, symm_prec)
self.structure = finder.get_refined_structure()
else:
self.structure = structure
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.symm_prec = symm_prec
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
def run(self):
"""
Run the enumeration.
"""
#Create a temporary directory for working.
with ScratchDir(".") as d:
logger.debug("Temp dir : {}".format(d))
try:
#Generate input files
self._gen_input_file()
#Perform the actual enumeration
num_structs = self._run_multienum()
#Read in the enumeration output as structures.
if num_structs > 0:
self.structures = self._get_structures(num_structs)
else:
raise ValueError("Unable to enumerate structure.")
except Exception:
import sys
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=10, file=sys.stdout)
def _gen_input_file(self):
"""
Generate the necessary struct_enum.in file for enumlib. See enumlib
documentation for details.
"""
coord_format = "{:.6f} {:.6f} {:.6f}"
# Using symmetry finder, get the symmetrically distinct sites.
fitter = SpacegroupAnalyzer(self.structure, self.symm_prec)
symmetrized_structure = fitter.get_symmetrized_structure()
logger.debug("Spacegroup {} ({}) with {} distinct sites".format(
fitter.get_spacegroup_symbol(),
fitter.get_spacegroup_number(),
len(symmetrized_structure.equivalent_sites))
)
"""
Enumlib doesn"t work when the number of species get too large. To
simplify matters, we generate the input file only with disordered sites
and exclude the ordered sites from the enumeration. The fact that
different disordered sites with the exact same species may belong to
different equivalent sites is dealt with by having determined the
spacegroup earlier and labelling the species differently.
"""
# index_species and index_amounts store mappings between the indices
# used in the enum input file, and the actual species and amounts.
index_species = []
index_amounts = []
#Stores the ordered sites, which are not enumerated.
ordered_sites = []
disordered_sites = []
coord_str = []
for sites in symmetrized_structure.equivalent_sites:
if sites[0].is_ordered:
ordered_sites.append(sites)
else:
sp_label = []
species = {k: v for k, v in sites[0].species_and_occu.items()}
if sum(species.values()) < 1 - EnumlibAdaptor.amount_tol:
#Let us first make add a dummy element for every single
#site whose total occupancies don't sum to 1.
species[DummySpecie("X")] = 1 - sum(species.values())
for sp in species.keys():
if sp not in index_species:
index_species.append(sp)
sp_label.append(len(index_species) - 1)
index_amounts.append(species[sp] * len(sites))
else:
ind = index_species.index(sp)
sp_label.append(ind)
index_amounts[ind] += species[sp] * len(sites)
sp_label = "/".join(["{}".format(i) for i in sorted(sp_label)])
for site in sites:
coord_str.append("{} {}".format(
coord_format.format(*site.coords),
sp_label))
disordered_sites.append(sites)
def get_sg_info(ss):
finder = SpacegroupAnalyzer(Structure.from_sites(ss), self.symm_prec)
sgnum = finder.get_spacegroup_number()
return sgnum
curr_sites = list(itertools.chain.from_iterable(disordered_sites))
min_sgnum = get_sg_info(curr_sites)
logger.debug("Disorderd sites has sgnum %d" % (
min_sgnum))
#It could be that some of the ordered sites has a lower symmetry than
#the disordered sites. So we consider the lowest symmetry sites as
#disordered in our enumeration.
self.ordered_sites = []
to_add = []
if self.check_ordered_symmetry:
for sites in ordered_sites:
temp_sites = list(curr_sites) + sites
sgnum = get_sg_info(temp_sites)
if sgnum < min_sgnum:
logger.debug("Adding {} to sites to be ordered. "
"New sgnum {}"
.format(sites, sgnum))
to_add = sites
min_sgnum = sgnum
for sites in ordered_sites:
if sites == to_add:
index_species.append(sites[0].specie)
index_amounts.append(len(sites))
sp_label = len(index_species) - 1
logger.debug("Lowest symmetry {} sites are included in enum."
.format(sites[0].specie))
for site in sites:
coord_str.append("{} {}".format(
coord_format.format(*site.coords),
sp_label))
disordered_sites.append(sites)
else:
self.ordered_sites.extend(sites)
self.index_species = index_species
lattice = self.structure.lattice
output = [self.structure.formula, "bulk"]
for vec in lattice.matrix:
output.append(coord_format.format(*vec))
output.append("{}".format(len(index_species)))
output.append("{}".format(len(coord_str)))
output.extend(coord_str)
output.append("{} {}".format(self.min_cell_size, self.max_cell_size))
output.append(str(self.enum_precision_parameter))
output.append("partial")
ndisordered = sum([len(s) for s in disordered_sites])
base = int(ndisordered*reduce(lcm,
[f.limit_denominator(
ndisordered *
self.max_cell_size).denominator
for f in map(fractions.Fraction,
index_amounts)]))
#base = ndisordered #10 ** int(math.ceil(math.log10(ndisordered)))
#To get a reasonable number of structures, we fix concentrations to the
#range expected in the original structure.
total_amounts = sum(index_amounts)
for amt in index_amounts:
conc = amt / total_amounts
if abs(conc * base - round(conc * base)) < 1e-5:
output.append("{} {} {}".format(int(round(conc * base)),
int(round(conc * base)),
base))
else:
min_conc = int(math.floor(conc * base))
output.append("{} {} {}".format(min_conc - 1, min_conc + 1,
base))
output.append("")
logger.debug("Generated input file:\n{}".format("\n".join(output)))
with open("struct_enum.in", "w") as f:
f.write("\n".join(output))
def _run_multienum(self):
p = subprocess.Popen([enum_cmd],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
output = p.communicate()[0].decode("utf-8")
count = 0
start_count = False
for line in output.strip().split("\n"):
if line.strip().endswith("RunTot"):
start_count = True
elif start_count and re.match("\d+\s+.*", line.strip()):
count = int(line.split()[-1])
logger.debug("Enumeration resulted in {} structures".format(count))
return count
def _get_structures(self, num_structs):
structs = []
rs = subprocess.Popen(["makestr.x",
"struct_enum.out", str(0),
str(num_structs - 1)],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if len(self.ordered_sites) > 0:
original_latt = self.ordered_sites[0].lattice
# Need to strip sites of site_properties, which would otherwise
# result in an index error. Hence Structure is reconstructed in
# the next step.
ordered_structure = Structure(
original_latt,
[site.species_and_occu for site in self.ordered_sites],
[site.frac_coords for site in self.ordered_sites])
inv_org_latt = np.linalg.inv(original_latt.matrix)
for n in range(1, num_structs + 1):
with open("vasp.{:06d}".format(n)) as f:
data = f.read()
data = re.sub("scale factor", "1", data)
data = re.sub("(\d+)-(\d+)", r"\1 -\2", data)
poscar = Poscar.from_string(data, self.index_species)
sub_structure = poscar.structure
#Enumeration may have resulted in a super lattice. We need to
#find the mapping from the new lattice to the old lattice, and
#perform supercell construction if necessary.
new_latt = sub_structure.lattice
sites = []
if len(self.ordered_sites) > 0:
transformation = np.dot(new_latt.matrix, inv_org_latt)
transformation = [[int(round(cell)) for cell in row]
for row in transformation]
logger.debug("Supercell matrix: {}".format(transformation))
s = Structure.from_sites(ordered_structure)
s.make_supercell(transformation)
sites.extend([site.to_unit_cell for site in s])
super_latt = sites[-1].lattice
else:
super_latt = new_latt
for site in sub_structure:
if site.specie.symbol != "X": # We exclude vacancies.
sites.append(PeriodicSite(site.species_and_occu,
site.frac_coords,
super_latt).to_unit_cell)
structs.append(Structure.from_sites(sorted(sites)))
logger.debug("Read in a total of {} structures.".format(num_structs))
return structs
|
|
"""
Command-line interface for model evaluation
@author Siddharth Reddy <[email protected]>
"""
from __future__ import division
import click
import logging
import math
import pickle
import os
import pandas as pd
import numpy as np
from lentil import datatools
from lentil import models
from lentil import est
from lentil import evaluate
_logger = logging.getLogger(__name__)
@click.command()
# path to interaction history CSV/pickle input file
@click.argument('history_file', type=click.Path(exists=True))
# path to pickled results file
@click.argument('results_file', type=click.Path(exists=False))
@click.option(
'--verbose', is_flag=True,
help='Makes debug messages visible')
@click.option(
'--using-lessons/--no-using-lessons', default=True,
help='Include embeddings of skill gains from lessons')
@click.option(
'--using-prereqs/--no-using-prereqs', default=True,
help='Include embeddings of prerequisites for lessons')
@click.option(
'--using-bias/--no-using-bias', default=True,
help='Include bias terms in the item response function')
@click.option(
'--embedding-dimension', default=2,
help='Dimensionality of latent skill space')
@click.option(
'--learning-update-variance', default=0.5,
help='Constant variance for Gaussian lesson updates')
@click.option(
'--opt-algo',
type=click.Choice(['l-bfgs-b', 'batch-gd', 'adagrad']),
default='l-bfgs-b',
help='Iterative optimization algorithm used for parameter estimation')
@click.option(
'--regularization-constant', default=1e-6,
help='Coefficient of norm regularization terms')
@click.option(
'--ftol', default=1e-3,
help='Stopping condition for iterative optimization')
@click.option('--learning-rate', default=5e-3, help='Fixed learning rate')
@click.option('--adagrad-eta', default=1e-3, help='Adagrad learning rate')
@click.option('--adagrad-eps', default=0.1, help='Adagrad epsilon')
@click.option('--num-folds', default=10, help='Number of folds in k-fold cross-validation')
@click.option(
'--truncation-style',
type=click.Choice(['random', 'last']),
default='last',
help='Truncate student history at random, or just before last assessment interactions')
def cli(
history_file,
results_file,
verbose,
num_folds,
truncation_style,
using_lessons,
using_prereqs,
using_bias,
embedding_dimension,
learning_update_variance,
opt_algo,
regularization_constant,
ftol,
learning_rate,
adagrad_eta,
adagrad_eps):
"""
This script provides a command-line interface for model evaluation.
It reads an interaction history from file, computes the cross-validated AUC of
an embedding model, and writes the results to file.
The pickled results will be an object of type :py:class:`evaluate.CVResults`
:param str history_file: Input path to CSV/pickle file containing interaction history
:param str results_file: Output path for pickled results of cross-validation
:param bool verbose: True => logger level set to logging.INFO
:param int num_folds: Number of folds in k-fold cross-validation
:param str truncation_style: Hold-out scheme for student histories
:param bool using_lessons: Including lessons in embedding
:param bool using_prereqs: Including lesson prereqs in embedding
:param bool using_bias: Including bias terms in embedding
:param int embedding_dimension: Number of dimensions in latent skill space
:param float learning_update_variance: Variance of Gaussian learning update
:param str opt_algo: Optimization algorithm for parameter estimation
:param float regularization_constant: Coefficient of regularization term in objective function
:param float ftol: Stopping condition for iterative optimization
:param float learning_rate: Fixed learning rate for gradient descent
:param float adagrad_eta: Base learning rate parameter for Adagrad
:param float adagrad_eps: Epsilon parameter for Adagrad
"""
if verbose and opt_algo == 'l-bfgs-b':
raise ValueError('Verbose mode is not currently supported for L-BFGS-B.\
Try turning off verbose mode, or change your choice of optimization algorithm.')
if verbose:
_logger.setLevel(logging.DEBUG)
click.echo('Loading interaction history from %s...' % click.format_filename(history_file))
_, history_file_ext = os.path.splitext(history_file)
if history_file_ext == '.csv':
data = pd.DataFrame.from_csv(history_file)
history = datatools.InteractionHistory(pd.read_csv(history_file))
elif history_file_ext == '.pkl':
with open(history_file, 'rb') as f:
history = pickle.load(f)
else:
raise ValueError('Unrecognized file extension for history_file.\
Please supply a .csv with an interaction history, or a .pkl file containing\
a datatools.InteractionHistory object.')
embedding_kwargs = {
'embedding_dimension' : embedding_dimension,
'using_lessons' : using_lessons,
'using_prereqs' : using_prereqs,
'using_bias' : using_bias,
'learning_update_variance_constant' : learning_update_variance
}
gradient_descent_kwargs = {
'using_adagrad' : opt_algo == 'adagrad',
'eta' : adagrad_eta,
'eps' : adagrad_eps,
'rate' : learning_rate,
'verify_gradient' : False,
'debug_mode_on' : verbose,
'ftol' : ftol,
'num_checkpoints' : 100
}
estimator = est.EmbeddingMAPEstimator(
regularization_constant=regularization_constant,
using_scipy=(opt_algo == 'l-bfgs-b'),
gradient_descent_kwargs=gradient_descent_kwargs,
verify_gradient=False,
debug_mode_on=verbose,
ftol=ftol)
def build_embedding(
embedding_kwargs,
estimator,
history,
filtered_history,
split_history=None):
model = models.EmbeddingModel(history, **embedding_kwargs)
estimator.filtered_history = filtered_history
if split_history is not None:
estimator.split_history = split_history
model.fit(estimator)
return model
model_builders = {
'model' : (lambda *args, **kwargs: build_embedding(
embedding_kwargs,
estimator,
*args,
**kwargs))
}
click.echo(
'Computing cross-validated AUC (num_folds=%d, truncation_style=%s)...' % (
num_folds,
truncation_style))
results = evaluate.cross_validated_auc(
model_builders,
history,
num_folds=num_folds,
random_truncations=(truncation_style == 'random'))
train_auc_mean = results.training_auc_mean('model')
val_auc_mean = results.validation_auc_mean('model')
train_auc_stderr = results.training_auc_stderr('model')
val_auc_stderr = results.validation_auc_stderr('model')
click.echo('AUCs with 95% confidence intervals:')
click.echo('Training AUC = %f (%f, %f)' % (
train_auc_mean,
train_auc_mean - 1.96 * train_auc_stderr,
train_auc_mean + 1.96 * train_auc_stderr))
click.echo('Validation AUC = %f (%f, %f)' % (
val_auc_mean,
val_auc_mean - 1.96 * val_auc_stderr,
val_auc_mean + 1.96 * val_auc_stderr))
with open(results_file, 'wb') as f:
pickle.dump(results, f, pickle.HIGHEST_PROTOCOL)
click.echo('Results written to %s' % results_file)
if __name__ == '__main__':
cli()
|
|
from datetime import date
from datetime import datetime
from dateutil import tz
import time
def nordic2Arrival(data, arrival_id):
"""
Function for converting a nordic file into a Arrival string
:param NordicData data: NordicData object to be converted
:param int arrival_id: arrival id of the assoc
:param int origin_id: origin id of the origin
:returns: arrival_string
"""
arrival_string = ""
station_code = data.station_code
ar_time = data.observation_time.replace(tzinfo=tz.tzutc()).timestamp()
jdate = data.observation_time.date()
station_assoc_id = -1
channel_id = -1
if data.sp_component is not None:
channel = data.sp_component.lower()
else:
channel = '-'
if channel == 'h':
channel = 'z'
if data.phase_type is not None:
iphase = data.phase_type
else:
iphase = '-'
stype = "-"
deltime = -1.0
azimuth = -1.0
delaz = -1.0
if data.apparent_velocity is not None:
slow = 110.7 / data.apparent_velocity
else:
slow = -1.0
delslo = -1.0
ema = -1.0
rect = -1.0
if data.max_amplitude is not None:
amp = data.max_amplitude
else:
amp = -1.0
if data.max_amplitude_period is not None:
per = data.max_amplitude_period
else:
per = -1.0
per = -1.0
logat = -1.0
clip = '-'
fm = '-'
snr = -1.0
if data.quality_indicator is not None:
qual = data.quality_indicator.lower()
else:
qual = '-'
auth = '-'
commid = 1
lddate = '-'
a_format = (
"{sta:6s} {ar_time:17.5f} {arid:8d} {jdate:8d} {stassid:8d} "
"{chanid:8d} {chan:8s} {iphase:8s} {stype:1s} {deltim:6.3f} "
"{azimuth:7.2f} {delaz:7.2f} {slow:7.2f} {delslo:7.2f} "
"{ema:7.2f} {rect:7.3f} {amp:10.1f} {per:7.2f} {logat:7.2f} "
"{clip:1s} {fm:2s} {snr:10.2f} {qual:1s} {auth:15s} {commid:8d} "
"{lddate:17s}"
)
arrival_string = a_format.format(
sta = station_code,
ar_time = ar_time,
arid = arrival_id,
jdate = int(jdate.strftime("%Y%j")),
stassid = station_assoc_id,
chanid = channel_id,
chan = channel,
iphase = iphase,
stype = stype,
deltim = deltime,
azimuth = -1.0,
delaz = delaz,
slow = slow,
delslo = delslo,
ema = ema,
rect = rect,
amp = amp,
per = per,
logat = logat,
clip = clip,
fm = fm,
snr = snr,
qual = qual,
auth = auth,
commid = commid,
lddate = lddate
)
return arrival_string
def nordic2Assoc(data, arrival_id, origin_id):
"""
Function for converting a nordic file into a Assoc string
:param NordicData data: NordicData object to be converted
:param int arrival_id: arrival id of the assoc
:param int origin_id: origin id of the origin
:returns: assoc string
"""
assoc_string = ""
station_code = data.station_code
phase = "-"
belief = -1.0
if data.epicenter_distance is not None:
delta = data.epicenter_distance
else:
delta = -1.0
station_to_event_azimuth = -1.0
if data.epicenter_to_station_azimuth is not None:
event_to_station_azimuth = data.epicenter_to_station_azimuth
else:
event_to_station_azimuth = -1.0
if data.travel_time_residual is not None:
time_residual = data.travel_time_residual
else:
time_residual = -1.0
time_def = '-'
if data.location_weight is not None:
weight = data.location_weight
else:
weight = -1.0
azimuth_residual = -1.0
azimuth_def = '-'
slowness_residual = -1.0
slowness_def = '-'
ema_residual = -999.0
vmodel = '-'
commid = -1
lddate = '-'
a_format = (
"{arid:8d} {orid:8d} {sta:6s} {phase:8s} {belief:4.2f} "
"{delta:8.3f} {seaz:7.2f} {esaz:7.2f} {time_residual:8.3f} "
"{time_def:1s} {azres:7.1f} {azimuth_def:1s} "
"{slowness_residual:7.2f} {slowness_def:1s} {ema_residual:7.1f} "
"{weight:6.3f} {vmodel:15s} {commid:8d} {lddate:17s}\n"
)
assoc_string += a_format.format (
arid = arrival_id,
orid = origin_id,
sta = station_code,
phase = phase,
belief = belief,
delta = delta,
seaz = station_to_event_azimuth,
esaz = event_to_station_azimuth,
time_residual = time_residual,
time_def = time_def,
azres = azimuth_residual,
azimuth_def = azimuth_def,
slowness_residual = slowness_residual,
slowness_def = slowness_def,
ema_residual = ema_residual,
weight = weight,
vmodel = vmodel,
commid = commid,
lddate = lddate
)
return assoc_string
def nordic2Origin(main_h, origin_id):
"""
Function for converting a nordic file into a Origin string
:param NordicMain main_h: NordicMain object to be converted
"""
origin_string = ""
latitude = main_h.epicenter_latitude
longitude = main_h.epicenter_longitude
depth = main_h.depth
ar_time = datetime.combine(main_h.origin_date, main_h.origin_time).replace(tzinfo=tz.tzutc()).timestamp()
jdate = main_h.origin_date
nass = -1
ndef = main_h.stations_used
npd = -1
grn = -1
srn = -1
etype = "-"
depdp = -999.0
if main_h.depth_control == 'F':
dtype = 'g'
elif main_h.depth_control == ' ':
dtype = "f"
else:
dtype = '-'
mb = -1.0
mbid = -1
ms = -1.0
msid = -1
ml = -1.0
mlid = -1
if main_h.type_of_magnitude_1 == 'L':
ml = main_h.magnitude_1
elif main_h.type_of_magnitude_1 == 'B':
mb = main_h.magnitude_1
elif main_h.type_of_magnitude_1 == 'S':
ms = main_h.magnitude_1
if main_h.type_of_magnitude_2 == 'L':
ml = main_h.magnitude_2
elif main_h.type_of_magnitude_2 == 'B':
mb = main_h.magnitude_2
elif main_h.type_of_magnitude_2 == 'S':
ms = main_h.magnitude_2
if main_h.type_of_magnitude_3 == 'L':
ml = main_h.magnitude_3
elif main_h.type_of_magnitude_3 == 'B':
mb = main_h.magnitude_3
elif main_h.type_of_magnitude_3 == 'S':
ms = main_h.magnitude_3
algorithm = "-"
auth = "-"
commid = -1
lddate = "-"
o_format = ("{latitude:9.4f} {longitude:9.4f} {depth:9.4f} {ar_time:17.5f} "
"{orid:8d} {evid:8d} {jdate:8d} {nass:4d} {ndef:4d} {npd:4d} "
"{grn:8d} {srn:8d} {etype:7s} {depdp:9.4f} {dtype:1s} {mb:7.2f} "
"{mbid:8d} {ms:7.2f} {msid:8d} {ml:7.2f} {mlid:8d} "
"{algorithm:15s} {auth:15s} {commid:8d} {lddate:17s}\n")
origin_string = o_format.format(latitude = latitude,
longitude = longitude,
depth = depth,
ar_time = ar_time,
orid = origin_id,
evid = -1,
jdate = int(jdate.strftime("%Y%j")),
nass = nass,
ndef = ndef,
npd = npd,
grn = grn,
srn = srn,
etype = etype,
depdp = depdp,
dtype = dtype,
mb = mb,
mbid = mbid,
ms = ms,
msid = msid,
ml = ml,
mlid = mlid,
algorithm = algorithm,
auth = auth,
commid = commid,
lddate = lddate)
return origin_string
def nordic2css(nordic_event, css_filename):
"""
Function for converting a nordic event into css format and writing it into a origin, assoc and arrival files.
"""
origin_id = 1
arrival_id = 1
origin_string = nordic2Origin(nordic_event.main_h[0],
origin_id)
arrival_string = ""
assoc_string = ""
for data in nordic_event.data:
arrival_string += nordic2Arrival(data, arrival_id) + "\n"
assoc_string += nordic2Assoc(data, arrival_id, origin_id) + "\n"
arrival_id += 1
origin_file = open(css_filename + ".origin", "w")
origin_file.write(origin_string)
origin_file.close()
arrival_file = open(css_filename + ".arrival", "w")
arrival_file.write(arrival_string)
arrival_file.close()
assoc_file = open(css_filename + ".assoc", "w")
assoc_file.write(assoc_string)
assoc_file.close()
|
|
## This code is written by Davide Albanese, <[email protected]>.
## (C) 2008 mlpy Developers.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['error', 'error_p', 'error_n', 'accuracy',
'sensitivity', 'specificity', 'ppv', 'npv',
'mcc', 'auc_wmw', 'mse', 'r2', 'r2_corr']
import numpy as np
"""
Compute metrics for assessing the performance of
classification/regression models.
The Confusion Matrix:
Total Samples (ts) | Actual Positives (ap) | Actual Negatives (an)
------------------------------------------------------------------------
Predicted Positives (pp) | True Positives (tp) | False Positives (fp)
------------------------------------------------------------------------
Predicted Negatives (pn) | False Negatives (fn) | True Negatives (tn)
"""
def is_binary(x):
ux = np.unique(x)
for elem in ux:
if elem not in [-1, 1]:
return False
return True
def true_pos(t, p):
w = (t == 1)
return (t[w] == p[w]).sum()
def true_neg(t, p):
w = (t == -1)
return (t[w] == p[w]).sum()
def false_pos(t, p):
w = (t == -1)
return (t[w] != p[w]).sum()
def false_neg(t, p):
w = (t == 1)
return (t[w] != p[w]).sum()
def error(t, p):
"""Error for binary and multiclass classification
problems.
:Parameters:
t : 1d array_like object integer
target values
p : 1d array_like object integer
predicted values
:Returns:
error : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
return (tarr != parr).sum() / float(tarr.shape[0])
def accuracy(t, p):
"""Accuracy for binary and multiclass classification
problems.
:Parameters:
t : 1d array_like object integer
target values
p : 1d array_like object integer
predicted values
:Returns:
accuracy : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
return (tarr == parr).sum() / float(tarr.shape[0])
def error_p(t, p):
"""Compute the positive error as:
error_p = fn / ap
Only binary classification problems with t[i] = -1/+1
are allowed.
:Parameters:
t : 1d array_like object integer (-1/+1)
target values
p : 1d array_like object integer (-1/+1)
predicted values
:Returns:
errorp : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
if not is_binary(tarr):
raise ValueError("only binary classification problems"
" with t[i] = -1/+1 are allowed.")
fn = false_neg(tarr, parr)
ap = float((true_pos(tarr, parr) + fn))
if ap == 0:
return 0.0
return fn / ap
def error_n(t, p):
"""Compute the negative error as:
error_n = fp / an
Only binary classification problems with t[i] = -1/+1
are allowed.
:Parameters:
t : 1d array_like object integer (-1/+1)
target values
p : 1d array_like object integer (-1/+1)
predicted values
:Returns:
errorp : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
if not is_binary(tarr):
raise ValueError("only binary classification problems"
" with t[i] = -1/+1 are allowed.")
fp = false_pos(tarr, parr)
an = float((true_neg(tarr, parr) + fp))
if an == 0:
return 0.0
return fp / an
def sensitivity(t, p):
"""Sensitivity, computed as:
sensitivity = tp / ap
Only binary classification problems with t[i] = -1/+1
are allowed.
:Parameters:
t : 1d array_like object integer (-1/+1)
target values
p : 1d array_like object integer (-1/+1)
predicted values
:Returns:
sensitivity : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
if not is_binary(tarr):
raise ValueError("only binary classification problems"
" with t[i] = -1/+1 are allowed.")
tp = true_pos(tarr, parr)
ap = float((tp + false_neg(tarr, parr)))
if ap == 0:
return 0.0
return tp / ap
def specificity(t, p):
"""Specificity, computed as:
specificity = tn / an
Only binary classification problems with t[i] = -1/+1
are allowed.
:Parameters:
t : 1d array_like object integer (-1/+1)
target values
p : 1d array_like object integer (-1/+1)
predicted values
:Returns:
sensitivity : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
if not is_binary(tarr):
raise ValueError("only binary classification problems"
" with t[i] = -1/+1 are allowed.")
tn = true_neg(tarr, parr)
an = float((false_pos(tarr, parr) + tn))
if an == 0:
return 0.0
return tn / an
def ppv(t, p):
"""Positive Predictive Value (PPV) computed as:
ppv = tp / pp
Only binary classification problems with t[i] = -1/+1
are allowed.
:Parameters:
t : 1d array_like object integer (-1/+1)
target values
p : 1d array_like object integer (-1/+1)
predicted values
:Returns:
PPV : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
if not is_binary(tarr):
raise ValueError("only binary classification problems"
" with t[i] = -1/+1 are allowed.")
tp = true_pos(tarr, parr)
pp = float((tp + false_pos(tarr, parr)))
if pp == 0:
return 0.0
return tp / pp
def npv(t, p):
"""Negative Predictive Value (NPV), computed as:
npv = tn / pn
Only binary classification problems with t[i] = -1/+1
are allowed.
:Parameters:
t : 1d array_like object integer (-1/+1)
target values
p : 1d array_like object integer (-1/+1)
predicted values
:Returns:
NPV : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
if not is_binary(tarr):
raise ValueError("only binary classification problems"
" with t[i] = -1/+1 are allowed.")
tn = true_neg(tarr, parr)
pn = float((tn + false_neg(tarr, parr)))
if pn == 0:
return 0.0
return tn / pn
def mcc(t, p):
"""Matthews Correlation Coefficient (MCC), computed as:
MCC = ((tp*tn)-(fp*fn)) / sqrt((tp+fn)*(tp+fp)*(tn+fn)*(tn+fp))
Only binary classification problems with t[i] = -1/+1 are allowed.
Returns a value between -1 and +1. A MCC of +1 represents
a perfect prediction, 0 an average random prediction and
-1 an inverse prediction.
If any of the four sums in the denominator is zero,
the denominator is set to one; this results in a Matthews
Correlation Coefficient of zero, which can be shown to be
the correct limiting value.
:Parameters:
t : 1d array_like object integer (-1/+1)
target values
p : 1d array_like object integer (-1/+1)
predicted values
:Returns:
MCC : float, in range [-1.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.int)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
if not is_binary(tarr):
raise ValueError("only binary classification problems"
" with t[i] = -1/+1 are allowed.")
tp = true_pos(tarr, parr)
tn = true_neg(tarr, parr)
fp = false_pos(tarr, parr)
fn = false_neg(tarr, parr)
den = np.sqrt((tp+fn)*(tp+fp)*(tn+fn)*(tn+fp))
if den == 0.0:
den = 1.0
num = np.float((tp*tn)-(fp*fn))
return num / den
def auc_wmw(t, p):
"""Compute the AUC by using the Wilcoxon-Mann-Whitney
statistic. Only binary classification problems with
t[i] = -1/+1 are allowed.
:Parameters:
t : 1d array_like object integer (-1/+1)
target values
p : 1d array_like object (negative/positive values)
predicted values
:Returns:
AUC : float, in range [0.0, 1.0]
"""
tarr = np.asarray(t, dtype=np.int)
parr = np.asarray(p, dtype=np.float)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
if not is_binary(tarr):
raise ValueError("only binary classification problems"
" with t[i] = -1/+1 are allowed.")
idxp = np.where(tarr == 1)[0]
idxn = np.where(tarr == -1)[0]
auc = 0.0
for i in idxp:
for j in idxn:
if (p[i] - p[j]) > 0.0:
auc += 1.0
return auc / float(idxp.shape[0] * idxn.shape[0])
def mse(t, p):
"""Mean Squared Error (MSE).
:Parameters:
t : 1d array_like object
target values
p : 1d array_like object
predicted values
:Returns:
MSE : float
"""
tarr = np.asarray(t, dtype=np.float)
parr = np.asarray(p, dtype=np.float)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
n = tarr.shape[0]
return np.sum((tarr - parr)**2) / n
def r2(t, p):
"""Coefficient of determination (R^2)
computed as 1 - (sserr/sstot), where `sserr` is
the sum of squares of residuals and `sstot` is
the total sum of squares.
:Parameters:
t : 1d array_like object
target values
p : 1d array_like object
predicted values
:Returns:
R^2 : float
"""
tarr = np.asarray(t, dtype=np.float)
parr = np.asarray(p, dtype=np.float)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
sserr = np.sum((tarr - parr)**2)
sstot = np.sum((tarr - tarr.mean())**2)
return 1. - (sserr / sstot)
def r2_corr(t, p):
"""Coefficient of determination (R^2)
computed as square of the correlation
coefficient.
:Parameters:
t : 1d array_like object
target values
p : 1d array_like object
predicted values
:Returns:
R^2 : float
"""
tarr = np.asarray(t, dtype=np.float)
parr = np.asarray(p, dtype=np.float)
if tarr.shape[0] != parr.shape[0]:
raise ValueError("t, p: shape mismatch")
return np.corrcoef(parr, tarr)[0,1]**2
|
|
# -*- coding: utf-8 -*-
"""
sphinx.util.nodes
~~~~~~~~~~~~~~~~~
Docutils node-related utility functions for Sphinx.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from six import text_type
from docutils import nodes
from sphinx import addnodes
from sphinx.locale import pairindextypes
class WarningStream(object):
def __init__(self, warnfunc):
self.warnfunc = warnfunc
self._re = re.compile(r'\((DEBUG|INFO|WARNING|ERROR|SEVERE)/[0-4]\)')
def write(self, text):
text = text.strip()
if text:
self.warnfunc(self._re.sub(r'\1:', text), None, '')
# \x00 means the "<" was backslash-escaped
explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
caption_ref_re = explicit_title_re # b/w compat alias
def apply_source_workaround(node):
if node.source and node.rawsource:
return
# workaround: nodes.term doesn't have source, line and rawsource
# (fixed in Docutils r7495)
if isinstance(node, nodes.term):
definition_list_item = node.parent
if definition_list_item.line is not None:
node.source = definition_list_item.source
node.line = definition_list_item.line - 1
node.rawsource = definition_list_item. \
rawsource.split("\n", 2)[0]
return
# workaround: docutils-0.10.0 or older's nodes.caption for nodes.figure
# and nodes.title for nodes.admonition doesn't have source, line.
# this issue was filed to Docutils tracker:
# sf.net/tracker/?func=detail&aid=3599485&group_id=38414&atid=422032
# sourceforge.net/p/docutils/patches/108/
if (isinstance(node, (
nodes.caption,
nodes.title,
nodes.rubric,
nodes.line,
))):
node.source = find_source_node(node)
node.line = 0 # need fix docutils to get `node.line`
return
IGNORED_NODES = (
nodes.Invisible,
nodes.Inline,
nodes.literal_block,
nodes.doctest_block,
# XXX there are probably more
)
def is_translatable(node):
if isinstance(node, nodes.TextElement):
apply_source_workaround(node)
if not node.source:
return False # built-in message
if isinstance(node, IGNORED_NODES) and 'translatable' not in node:
return False
# <field_name>orphan</field_name>
# XXX ignore all metadata (== docinfo)
if isinstance(node, nodes.field_name) and node.children[0] == 'orphan':
return False
return True
if isinstance(node, nodes.image) and node.get('translatable'):
return True
return False
LITERAL_TYPE_NODES = (
nodes.literal_block,
nodes.doctest_block,
nodes.raw,
)
IMAGE_TYPE_NODES = (
nodes.image,
)
def extract_messages(doctree):
"""Extract translatable messages from a document tree."""
for node in doctree.traverse(is_translatable):
if isinstance(node, LITERAL_TYPE_NODES):
msg = node.rawsource
if not msg:
msg = node.astext()
elif isinstance(node, IMAGE_TYPE_NODES):
msg = '.. image:: %s' % node['uri']
if node.get('alt'):
msg += '\n :alt: %s' % node['alt']
else:
msg = node.rawsource.replace('\n', ' ').strip()
# XXX nodes rendering empty are likely a bug in sphinx.addnodes
if msg:
yield node, msg
def find_source_node(node):
for pnode in traverse_parent(node):
if pnode.source:
return pnode.source
def traverse_parent(node):
while node:
yield node
node = node.parent
def traverse_translatable_index(doctree):
"""Traverse translatable index node from a document tree."""
def is_block_index(node):
return isinstance(node, addnodes.index) and \
node.get('inline') is False
for node in doctree.traverse(is_block_index):
if 'raw_entries' in node:
entries = node['raw_entries']
else:
entries = node['entries']
yield node, entries
def nested_parse_with_titles(state, content, node):
"""Version of state.nested_parse() that allows titles and does not require
titles to have the same decoration as the calling document.
This is useful when the parsed content comes from a completely different
context, such as docstrings.
"""
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
try:
return state.nested_parse(content, 0, node, match_titles=1)
finally:
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
def clean_astext(node):
"""Like node.astext(), but ignore images."""
node = node.deepcopy()
for img in node.traverse(nodes.image):
img['alt'] = ''
return node.astext()
def split_explicit_title(text):
"""Split role content into title and target, if given."""
match = explicit_title_re.match(text)
if match:
return True, match.group(1), match.group(2)
return False, text, text
indextypes = [
'single', 'pair', 'double', 'triple', 'see', 'seealso',
]
def process_index_entry(entry, targetid):
indexentries = []
entry = entry.strip()
oentry = entry
main = ''
if entry.startswith('!'):
main = 'main'
entry = entry[1:].lstrip()
for type in pairindextypes:
if entry.startswith(type+':'):
value = entry[len(type)+1:].strip()
value = pairindextypes[type] + '; ' + value
indexentries.append(('pair', value, targetid, main))
break
else:
for type in indextypes:
if entry.startswith(type+':'):
value = entry[len(type)+1:].strip()
if type == 'double':
type = 'pair'
indexentries.append((type, value, targetid, main))
break
# shorthand notation for single entries
else:
for value in oentry.split(','):
value = value.strip()
main = ''
if value.startswith('!'):
main = 'main'
value = value[1:].lstrip()
if not value:
continue
indexentries.append(('single', value, targetid, main))
return indexentries
def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc):
"""Inline all toctrees in the *tree*.
Record all docnames in *docnameset*, and output docnames with *colorfunc*.
"""
tree = tree.deepcopy()
for toctreenode in tree.traverse(addnodes.toctree):
newnodes = []
includefiles = map(text_type, toctreenode['includefiles'])
for includefile in includefiles:
try:
builder.info(colorfunc(includefile) + " ", nonl=1)
subtree = inline_all_toctrees(builder, docnameset, includefile,
builder.env.get_doctree(includefile),
colorfunc)
docnameset.add(includefile)
except Exception:
builder.warn('toctree contains ref to nonexisting '
'file %r' % includefile,
builder.env.doc2path(docname))
else:
sof = addnodes.start_of_file(docname=includefile)
sof.children = subtree.children
for sectionnode in sof.traverse(nodes.section):
if 'docname' not in sectionnode:
sectionnode['docname'] = includefile
newnodes.append(sof)
toctreenode.parent.replace(toctreenode, newnodes)
return tree
def make_refnode(builder, fromdocname, todocname, targetid, child, title=None):
"""Shortcut to create a reference node."""
node = nodes.reference('', '', internal=True)
if fromdocname == todocname:
node['refid'] = targetid
else:
node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) +
'#' + targetid)
if title:
node['reftitle'] = title
node.append(child)
return node
def set_source_info(directive, node):
node.source, node.line = \
directive.state_machine.get_source_and_line(directive.lineno)
def set_role_source_info(inliner, lineno, node):
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
# monkey-patch Element.copy to copy the rawsource
def _new_copy(self):
return self.__class__(self.rawsource, **self.attributes)
nodes.Element.copy = _new_copy
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import json
import os
from pymatgen.electronic_structure.cohp import CompleteCohp, Cohp, IcohpValue, IcohpCollection
from pymatgen.electronic_structure.core import Spin, Orbital
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files", "cohp")
class CohpTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "cohp.json"), "r") as f:
self.cohp = Cohp.from_dict(json.load(f))
self.cohp_only = Cohp(self.cohp.efermi,
self.cohp.energies,
self.cohp.cohp)
with open(os.path.join(test_dir, "coop.json"), "r") as f:
self.coop = Cohp.from_dict(json.load(f))
def test_as_from_dict(self):
with open(os.path.join(test_dir, "cohp.json"), "r") as f:
cohp_dict = json.load(f)
self.assertEqual(self.cohp.as_dict(), cohp_dict)
def test_attributes(self):
self.assertEqual(len(self.cohp.energies), 301)
self.assertEqual(self.cohp.efermi, 9.75576)
self.assertEqual(self.coop.efermi, 5.90043)
self.assertFalse(self.cohp.are_coops)
self.assertTrue(self.coop.are_coops)
def test_get_icohp(self):
self.assertEqual(self.cohp.get_icohp(),
self.cohp.get_cohp(integrated=True))
self.assertEqual(None, self.cohp_only.get_icohp())
def test_get_interpolated_value(self):
# icohp_ef are the ICHOP(Ef) values taken from
# the ICOHPLIST.lobster file.
icohp_ef_dict = {Spin.up: -0.10218, Spin.down: -0.19701}
icoop_ef_dict = {Spin.up: 0.24714}
icohp_ef = self.cohp.get_interpolated_value(self.cohp.efermi,
integrated=True)
icoop_ef = self.coop.get_interpolated_value(self.coop.efermi,
integrated=True)
self.assertAlmostEqual(icohp_ef_dict, icohp_ef)
self.assertAlmostEqual(icoop_ef_dict, icoop_ef)
with self.assertRaises(ValueError):
self.cohp_only.get_interpolated_value(5.0, integrated=True)
def test_str(self):
with open(os.path.join(test_dir, "cohp.str"), "rt") as f:
str_cohp = f.read()
with open(os.path.join(test_dir, "coop.str"), "rt") as f:
str_coop = f.read()
self.assertEqual(self.cohp.__str__(), str_cohp)
self.assertEqual(self.coop.__str__(), str_coop)
def test_antibnd_states_below_efermi(self):
self.assertDictEqual(self.cohp.has_antibnd_states_below_efermi(spin=None), {Spin.up: True, Spin.down: True})
self.assertDictEqual(self.cohp.has_antibnd_states_below_efermi(spin=None, limit=0.5),
{Spin.up: False, Spin.down: False})
self.assertDictEqual(self.cohp.has_antibnd_states_below_efermi(spin=Spin.up, limit=0.5), {Spin.up: False})
class IcohpValueTest(unittest.TestCase):
def setUp(self):
# without spin polarization
label = "1"
atom1 = "K1"
atom2 = "F2"
length = "2.3"
translation = [-1, 0, 0]
num = 1
icohp = {Spin.up: -2.0}
are_coops = False
self.icohpvalue = IcohpValue(label=label, atom1=atom1, atom2=atom2, length=length, translation=translation,
num=num, icohp=icohp, are_coops=are_coops)
label_sp = "1"
atom1_sp = "K1"
atom2_sp = "F2"
length_sp = "2.3"
translation_sp = [-1, 0, 0]
num_sp = 1
icohp_sp = {Spin.up: -1.1, Spin.down: -1.0}
are_coops_sp = False
self.icohpvalue_sp = IcohpValue(label=label_sp, atom1=atom1_sp, atom2=atom2_sp, length=length_sp,
translation=translation_sp, num=num_sp, icohp=icohp_sp, are_coops=are_coops_sp)
def test_attributes(self):
# without spin polarization
self.assertEqual(self.icohpvalue_sp.num_bonds, 1)
self.assertEqual(self.icohpvalue_sp.are_coops, False)
self.assertEqual(self.icohpvalue_sp.is_spin_polarized, True)
self.assertDictEqual(self.icohpvalue.icohp, {Spin.up: -2.0})
# with spin polarization
self.assertEqual(self.icohpvalue_sp.num_bonds, 1)
self.assertEqual(self.icohpvalue_sp.are_coops, False)
self.assertEqual(self.icohpvalue_sp.is_spin_polarized, True)
self.assertDictEqual(self.icohpvalue_sp.icohp, {Spin.up: -1.1, Spin.down: -1.0})
def test_icohpvalue(self):
# without spin polarization
self.assertEqual(self.icohpvalue.icohpvalue(spin=Spin.up), -2.0)
# with spin polarization
self.assertEqual(self.icohpvalue_sp.icohpvalue(spin=Spin.up), -1.1)
self.assertEqual(self.icohpvalue_sp.icohpvalue(spin=Spin.down), -1.0)
def test_summed_icohp(self):
# without spin polarization
self.assertEqual(self.icohpvalue.summed_icohp, -2.0)
# with spin polarization
self.assertEqual(self.icohpvalue_sp.summed_icohp, -2.1)
class CombinedIcohpTest(unittest.TestCase):
def setUp(self):
# without spin polarization:
are_coops = False
is_spin_polarized = False
list_atom2 = ['K2', 'K2', 'K2', 'K2', 'K2', 'K2']
list_icohp = [{Spin.up: -0.40075}, {Spin.up: -0.40074}, {Spin.up: -0.40079}, {Spin.up: -0.40079},
{Spin.up: -0.40074}, {Spin.up: -0.40075}]
list_icoop = [{Spin.up: 0.02342}, {Spin.up: 0.02342}, {Spin.up: 0.02343}, {Spin.up: 0.02343},
{Spin.up: 0.02342}, {Spin.up: 0.02342}]
list_labels = ['1', '2', '3', '4', '5', '6']
list_length = [2.71199, 2.71199, 2.71199, 2.71199, 2.71199, 2.71199]
list_num = [1, 1, 1, 1, 1, 1]
list_atom1 = ['F1', 'F1', 'F1', 'F1', 'F1', 'F1']
list_translation = [[0, -1, -1], [-1, 0, -1], [0, 0, -1], [-1, -1, 0], [0, -1, 0], [-1, 0, 0]]
self.icohpcollection_KF = IcohpCollection(is_spin_polarized=is_spin_polarized, are_coops=are_coops,
list_labels=list_labels, list_atom1=list_atom1, list_atom2=list_atom2,
list_length=list_length, list_translation=list_translation,
list_num=list_num, list_icohp=list_icohp)
self.icoopcollection_KF = IcohpCollection(is_spin_polarized=is_spin_polarized, are_coops=True,
list_labels=list_labels, list_atom1=list_atom1, list_atom2=list_atom2,
list_length=list_length, list_translation=list_translation,
list_num=list_num, list_icohp=list_icoop)
# with spin polarization:
list_atom2_sp = ['Fe7', 'Fe9']
list_labels_sp = ['1', '2']
list_translation_sp = [[0, 0, 0], [0, 0, 0]]
list_length_sp = [2.83189, 2.45249]
list_atom1_sp = ['Fe8', 'Fe8']
is_spin_polarized_sp = True
are_coops_sp = False
list_num_sp = [2, 1]
list_icohp_sp = [{Spin.up: -0.10218, Spin.down: -0.19701}, {Spin.up: -0.28485, Spin.down: -0.58279}]
list_icoop_sp = [{Spin.up: -0.11389, Spin.down: -0.20828}, {Spin.up: -0.04087, Spin.down: -0.05756}]
self.icohpcollection_Fe = IcohpCollection(is_spin_polarized=is_spin_polarized_sp, are_coops=are_coops_sp,
list_labels=list_labels_sp, list_atom1=list_atom1_sp,
list_atom2=list_atom2_sp, list_length=list_length_sp,
list_translation=list_translation_sp, list_num=list_num_sp,
list_icohp=list_icohp_sp)
self.icoopcollection_Fe = IcohpCollection(is_spin_polarized=is_spin_polarized_sp, are_coops=True,
list_labels=list_labels_sp, list_atom1=list_atom1_sp,
list_atom2=list_atom2_sp, list_length=list_length_sp,
list_translation=list_translation_sp, list_num=list_num_sp,
list_icohp=list_icoop_sp)
def test_get_icohp_by_label(self):
# without spin polarization
# ICOHPs
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("1"), -0.40075)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("2"), -0.40074)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("3"), -0.40079)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("4"), -0.40079)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("5"), -0.40074)
self.assertEqual(self.icohpcollection_KF.get_icohp_by_label("6"), -0.40075)
# with spin polarization
# summed spin
# ICOHPs
self.assertEqual(self.icohpcollection_Fe.get_icohp_by_label("1"), -0.10218 - 0.19701)
self.assertEqual(self.icohpcollection_Fe.get_icohp_by_label("2"), -0.28485 - 0.58279)
# Spin up
# ICOHPs
self.assertEqual(self.icohpcollection_Fe.get_icohp_by_label("1", summed_spin_channels=False), -0.10218)
self.assertEqual(self.icohpcollection_Fe.get_icohp_by_label("2", summed_spin_channels=False), -0.28485)
# Spin down
# ICOHPs
self.assertEqual(self.icohpcollection_Fe.get_icohp_by_label("1", summed_spin_channels=False, spin=Spin.down),
-0.19701)
self.assertEqual(self.icohpcollection_Fe.get_icohp_by_label("2", summed_spin_channels=False, spin=Spin.down),
-0.58279)
def test_get_summed_icohp_by_label_list(self):
# without spin polarization
self.assertAlmostEqual(
self.icohpcollection_KF.get_summed_icohp_by_label_list(["1", "2", "3", "4", "5", "6"], divisor=6.0),
-0.40076)
# with spin polarization
sum1 = (-0.10218 - 0.19701 - 0.28485 - 0.58279) / 2.0
sum2 = (-0.10218 - 0.28485) / 2.0
sum3 = (-0.19701 - 0.58279) / 2.0
self.assertAlmostEqual(self.icohpcollection_Fe.get_summed_icohp_by_label_list(["1", "2"], divisor=2.0), sum1)
self.assertAlmostEqual(
self.icohpcollection_Fe.get_summed_icohp_by_label_list(["1", "2"], summed_spin_channels=False, divisor=2.0),
sum2)
self.assertAlmostEqual(
self.icohpcollection_Fe.get_summed_icohp_by_label_list(["1", "2"], summed_spin_channels=False,
spin=Spin.down, divisor=2.0), sum3)
def test_get_icohp_dict_by_bondlengths(self):
# without spin polarization
icohpvalue = {}
icohpvalue["1"] = {'@module': 'pymatgen.electronic_structure.cohp', 'num': 1, 'length': 2.71199,
'icohp': {Spin.up: -0.40075},
'are_coops': False, 'label': '1', 'atom2': 'K2', '@class': 'IcohpValue', 'atom1': 'F1',
'translation': [0, -1, -1]}
icohpvalue["2"] = {'@module': 'pymatgen.electronic_structure.cohp', 'num': 1, 'length': 2.71199,
'icohp': {Spin.up: -0.40074},
'are_coops': False, 'label': '2', 'atom2': 'K2', '@class': 'IcohpValue', 'atom1': 'F1',
'translation': [-1, 0, -1]}
icohpvalue["3"] = {'@module': 'pymatgen.electronic_structure.cohp', 'num': 1, 'length': 2.71199,
'icohp': {Spin.up: -0.40079},
'are_coops': False, 'label': '3', 'atom2': 'K2', '@class': 'IcohpValue', 'atom1': 'F1',
'translation': [0, 0, -1]}
icohpvalue["4"] = {'@module': 'pymatgen.electronic_structure.cohp', 'num': 1, 'length': 2.71199,
'icohp': {Spin.up: -0.40079},
'are_coops': False, 'label': '4', 'atom2': 'K2', '@class': 'IcohpValue', 'atom1': 'F1',
'translation': [-1, -1, 0]}
icohpvalue["5"] = {'@module': 'pymatgen.electronic_structure.cohp', 'num': 1, 'length': 2.71199,
'icohp': {Spin.up: -0.40074},
'are_coops': False, 'label': '5', 'atom2': 'K2', '@class': 'IcohpValue', 'atom1': 'F1',
'translation': [0, -1, 0]}
icohpvalue["6"] = {'@module': 'pymatgen.electronic_structure.cohp', 'num': 1, 'length': 2.71199,
'icohp': {Spin.up: -0.40075},
'are_coops': False, 'label': '6', 'atom2': 'K2', '@class': 'IcohpValue', 'atom1': 'F1',
'translation': [-1, 0, 0]}
dict_KF = self.icohpcollection_KF.get_icohp_dict_by_bondlengths(minbondlength=0.0, maxbondlength=8.0)
for key, value in sorted(dict_KF.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohpvalue[key])
self.assertDictEqual({}, self.icohpcollection_KF.get_icohp_dict_by_bondlengths(minbondlength=0.0,
maxbondlength=1.0))
# with spin polarization
icohpvalue_spin = {}
icohpvalue_spin["1"] = {'num': 2, 'atom2': 'Fe7', 'translation': [0, 0, 0],
'@module': 'pymatgen.electronic_structure.cohp',
'are_coops': False, 'atom1': 'Fe8',
'label': '1', 'length': 2.83189, '@class': 'IcohpValue',
'icohp': {Spin.up: -0.10218, Spin.down: -0.19701}}
icohpvalue_spin["2"] = {'num': 1, 'atom2': 'Fe9', 'translation': [0, 0, 0],
'@module': 'pymatgen.electronic_structure.cohp',
'are_coops': False, 'atom1': 'Fe8',
'label': '2', 'length': 2.45249, '@class': 'IcohpValue',
'icohp': {Spin.up: -0.28485, Spin.down: -0.58279}}
dict_Fe = self.icohpcollection_Fe.get_icohp_dict_by_bondlengths(minbondlength=0.0, maxbondlength=8.0)
for key, value in sorted(dict_Fe.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohpvalue_spin[key])
dict_Fe2 = self.icohpcollection_Fe.get_icohp_dict_by_bondlengths(minbondlength=2.5, maxbondlength=2.9)
self.assertEqual(len(dict_Fe2), 1)
for key, value in sorted(dict_Fe2.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohpvalue_spin[key])
def test_get_icohp_dict_of_site(self):
# without spin polarization
icohpvalue = {}
icohpvalue["1"] = {'translation': [0, -1, -1], 'are_coops': False,
'@module': 'pymatgen.electronic_structure.cohp', 'length': 2.71199,
'atom2': 'K2',
'@class': 'IcohpValue', 'atom1': 'F1', 'num': 1, 'label': '1', 'icohp': {Spin.up: -0.40075}}
icohpvalue["2"] = {'translation': [-1, 0, -1], 'are_coops': False,
'@module': 'pymatgen.electronic_structure.cohp', 'length': 2.71199,
'atom2': 'K2',
'@class': 'IcohpValue', 'atom1': 'F1', 'num': 1, 'label': '2', 'icohp': {Spin.up: -0.40074}}
icohpvalue["3"] = {'translation': [0, 0, -1], 'are_coops': False,
'@module': 'pymatgen.electronic_structure.cohp', 'length': 2.71199,
'atom2': 'K2',
'@class': 'IcohpValue', 'atom1': 'F1', 'num': 1, 'label': '3', 'icohp': {Spin.up: -0.40079}}
icohpvalue["4"] = {'translation': [-1, -1, 0], 'are_coops': False,
'@module': 'pymatgen.electronic_structure.cohp', 'length': 2.71199,
'atom2': 'K2',
'@class': 'IcohpValue', 'atom1': 'F1', 'num': 1, 'label': '4', 'icohp': {Spin.up: -0.40079}}
icohpvalue["5"] = {'translation': [0, -1, 0], 'are_coops': False,
'@module': 'pymatgen.electronic_structure.cohp', 'length': 2.71199,
'atom2': 'K2',
'@class': 'IcohpValue', 'atom1': 'F1', 'num': 1, 'label': '5', 'icohp': {Spin.up: -0.40074}}
icohpvalue["6"] = {'translation': [-1, 0, 0], 'are_coops': False,
'@module': 'pymatgen.electronic_structure.cohp', 'length': 2.71199,
'atom2': 'K2',
'@class': 'IcohpValue', 'atom1': 'F1', 'num': 1, 'label': '6', 'icohp': {Spin.up: -0.40075}}
dict_KF = self.icohpcollection_KF.get_icohp_dict_of_site(site=0)
for key, value in sorted(dict_KF.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohpvalue[key])
# compare number of results dependent on minsummedicohp, maxsummedicohp,minbondlength, maxbondlength, and only_bonds_to
dict_KF_2 = self.icohpcollection_KF.get_icohp_dict_of_site(site=0, minsummedicohp=None,
maxsummedicohp=-0.0, minbondlength=0.0,
maxbondlength=8.0)
dict_KF_3 = self.icohpcollection_KF.get_icohp_dict_of_site(site=0, minsummedicohp=None,
maxsummedicohp=-0.5, minbondlength=0.0,
maxbondlength=8.0)
dict_KF_4 = self.icohpcollection_KF.get_icohp_dict_of_site(site=0, minsummedicohp=0.0,
maxsummedicohp=None, minbondlength=0.0,
maxbondlength=8.0)
dict_KF_5 = self.icohpcollection_KF.get_icohp_dict_of_site(site=0, minsummedicohp=None,
maxsummedicohp=None, minbondlength=0.0,
maxbondlength=2.0)
dict_KF_6 = self.icohpcollection_KF.get_icohp_dict_of_site(site=0, minsummedicohp=None,
maxsummedicohp=None, minbondlength=3.0,
maxbondlength=8.0)
dict_KF_7 = self.icohpcollection_KF.get_icohp_dict_of_site(site=0, only_bonds_to=['K'])
dict_KF_8 = self.icohpcollection_KF.get_icohp_dict_of_site(site=1, only_bonds_to=['K'])
dict_KF_9 = self.icohpcollection_KF.get_icohp_dict_of_site(site=1, only_bonds_to=['F'])
self.assertEqual(len(dict_KF_2), 6)
self.assertEqual(len(dict_KF_3), 0)
self.assertEqual(len(dict_KF_4), 0)
self.assertEqual(len(dict_KF_5), 0)
self.assertEqual(len(dict_KF_6), 0)
self.assertEqual(len(dict_KF_7), 6)
self.assertEqual(len(dict_KF_8), 0)
self.assertEqual(len(dict_KF_9), 6)
# spin polarization
dict_Fe = self.icohpcollection_Fe.get_icohp_dict_of_site(site=0)
self.assertEqual(len(dict_Fe), 0)
# Fe8
dict_Fe2 = self.icohpcollection_Fe.get_icohp_dict_of_site(site=7)
self.assertEqual(len(dict_Fe2), 2)
# Test the values
icohplist_Fe = {}
icohplist_Fe["1"] = {'are_coops': False, 'translation': [0, 0, 0],
'icohp': {Spin.down: -0.19701, Spin.up: -0.10218}, 'length': 2.83189,
'@module': 'pymatgen.electronic_structure.cohp', 'atom1': 'Fe8', 'atom2': 'Fe7',
'label': '1',
'@class': 'IcohpValue', 'num': 2}
icohplist_Fe["2"] = {'are_coops': False, 'translation': [0, 0, 0],
'icohp': {Spin.down: -0.58279, Spin.up: -0.28485}, 'length': 2.45249,
'@module': 'pymatgen.electronic_structure.cohp', 'atom1': 'Fe8', 'atom2': 'Fe9',
'label': '2',
'@class': 'IcohpValue', 'num': 1}
for key, value in sorted(dict_Fe2.items()):
v = value.as_dict()
if "@version" in v:
v.pop("@version")
self.assertEqual(v, icohplist_Fe[key])
# Fe9
dict_Fe3 = self.icohpcollection_Fe.get_icohp_dict_of_site(site=8)
self.assertEqual(len(dict_Fe3), 1)
# compare number of results dependent on minsummedicohp, maxsummedicohp,minbondlength, maxbondlength
# Fe8
dict_Fe4 = self.icohpcollection_Fe.get_icohp_dict_of_site(site=7, minsummedicohp=-0.3,
maxsummedicohp=None, minbondlength=0.0,
maxbondlength=8.0)
self.assertEqual(len(dict_Fe4), 1)
values = []
for key, value in dict_Fe4.items():
values.append(value)
v = values[0].as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohplist_Fe["1"])
dict_Fe5 = self.icohpcollection_Fe.get_icohp_dict_of_site(site=7, minsummedicohp=None,
maxsummedicohp=-0.3, minbondlength=0.0,
maxbondlength=8.0)
self.assertEqual(len(dict_Fe5), 1)
values = []
for key, value in dict_Fe5.items():
values.append(value)
v = values[0].as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohplist_Fe["2"])
dict_Fe6 = self.icohpcollection_Fe.get_icohp_dict_of_site(site=7, minsummedicohp=None,
maxsummedicohp=None, minbondlength=0.0,
maxbondlength=2.5)
self.assertEqual(len(dict_Fe6), 1)
values = []
for key, value in dict_Fe6.items():
values.append(value)
v = values[0].as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohplist_Fe["2"])
dict_Fe7 = self.icohpcollection_Fe.get_icohp_dict_of_site(site=7, minsummedicohp=None,
maxsummedicohp=None, minbondlength=2.5,
maxbondlength=8.0)
self.assertEqual(len(dict_Fe7), 1)
values = []
for key, value in dict_Fe7.items():
values.append(value)
v = values[0].as_dict()
if "@version" in v:
v.pop("@version")
self.assertDictEqual(v, icohplist_Fe["1"])
def test_extremum_icohpvalue(self):
# without spin polarization
# ICOHPs
self.assertEqual(self.icohpcollection_KF.extremum_icohpvalue(), -0.40079)
# ICOOPs
self.assertEqual(self.icoopcollection_KF.extremum_icohpvalue(), 0.02343)
# with spin polarization
# summed spin
# ICOHPs
self.assertEqual(self.icohpcollection_Fe.extremum_icohpvalue(), -0.86764)
self.assertAlmostEqual(self.icoopcollection_Fe.extremum_icohpvalue(), -0.09842999999999999)
# ICOOPs
# spin up
# ICOHPs
self.assertEqual(self.icohpcollection_Fe.extremum_icohpvalue(summed_spin_channels=False), -0.28485)
# ICOOPs
self.assertEqual(self.icoopcollection_Fe.extremum_icohpvalue(summed_spin_channels=False), -0.04087)
# spin down
# ICOHPs
self.assertEqual(self.icohpcollection_Fe.extremum_icohpvalue(summed_spin_channels=False, spin=Spin.down),
-0.58279)
# ICOOPs
self.assertEqual(self.icoopcollection_Fe.extremum_icohpvalue(summed_spin_channels=False, spin=Spin.down),
-0.05756)
class CompleteCohpTest(PymatgenTest):
def setUp(self):
filepath = os.path.join(test_dir, "complete_cohp_lobster.json")
with open(filepath, "r") as f:
self.cohp_lobster_dict = CompleteCohp.from_dict(json.load(f))
filepath = os.path.join(test_dir, "complete_coop_lobster.json")
with open(filepath, "r") as f:
self.coop_lobster_dict = CompleteCohp.from_dict(json.load(f))
filepath = os.path.join(test_dir, "complete_cohp_lmto.json")
with open(filepath, "r") as f:
self.cohp_lmto_dict = CompleteCohp.from_dict(json.load(f))
filepath = os.path.join(test_dir, "complete_cohp_orbitalwise.json")
with open(filepath, "r") as f:
self.cohp_orb_dict = CompleteCohp.from_dict(json.load(f))
# Lobster 3.0
filepath = os.path.join(test_dir, "complete_cohp_forb.json")
with open(filepath, "r") as f:
self.cohp_lobster_forb_dict = CompleteCohp.from_dict(json.load(f))
# Lobster 2.0
filepath = os.path.join(test_dir, "COPL.BiSe")
structure = os.path.join(test_dir, "CTRL.BiSe")
self.cohp_lmto = CompleteCohp.from_file("lmto", filename=filepath,
structure_file=structure)
filepath = os.path.join(test_dir, "COHPCAR.lobster")
structure = os.path.join(test_dir, "POSCAR")
self.cohp_lobster = CompleteCohp.from_file("lobster",
filename=filepath,
structure_file=structure)
filepath = os.path.join(test_dir, "COOPCAR.lobster.BiSe")
structure = os.path.join(test_dir, "POSCAR.BiSe")
self.coop_lobster = CompleteCohp.from_file("lobster",
filename=filepath,
structure_file=structure,
are_coops=True)
filepath = os.path.join(test_dir, "COHPCAR.lobster.orbitalwise")
structure = os.path.join(test_dir, "POSCAR.orbitalwise")
self.cohp_orb = CompleteCohp.from_file("lobster",
filename=filepath,
structure_file=structure)
filepath = os.path.join(test_dir, "COHPCAR.lobster.notot.orbitalwise")
self.cohp_notot = CompleteCohp.from_file("lobster",
filename=filepath,
structure_file=structure)
# Lobster 3.0
filepath = os.path.join(test_dir, "COHPCAR.lobster.Na2UO4")
structure = os.path.join(test_dir, "POSCAR.Na2UO4")
self.cohp_lobster_forb = CompleteCohp.from_file("lobster", filename=filepath, structure_file=structure)
def test_attiributes(self):
self.assertFalse(self.cohp_lobster.are_coops)
self.assertFalse(self.cohp_lobster_dict.are_coops)
self.assertFalse(self.cohp_lmto.are_coops)
self.assertFalse(self.cohp_lmto_dict.are_coops)
self.assertTrue(self.coop_lobster.are_coops)
self.assertTrue(self.coop_lobster_dict.are_coops)
self.assertFalse(self.cohp_lobster_forb.are_coops)
self.assertFalse(self.cohp_lobster_forb_dict.are_coops)
self.assertEqual(len(self.cohp_lobster.energies), 301)
self.assertEqual(len(self.cohp_lmto.energies), 801)
self.assertEqual(len(self.coop_lobster.energies), 241)
self.assertEqual(len(self.cohp_lobster_forb.energies), 7)
self.assertEqual(self.cohp_lobster.efermi, 9.75576)
self.assertEqual(self.cohp_lmto.efermi, -2.3433)
self.assertEqual(self.coop_lobster.efermi, 5.90043)
self.assertEqual(self.cohp_lobster_forb.efermi, 4.12875)
def test_dict(self):
# The json files are dict representations of the COHPs from the LMTO
# and LOBSTER calculations and should thus be the same.
self.assertEqual(self.cohp_lobster.as_dict(),
self.cohp_lobster_dict.as_dict())
self.assertEqual(self.cohp_orb.as_dict(),
self.cohp_orb_dict.as_dict())
# Lobster 3.0, including f orbitals
self.assertEqual(self.cohp_lobster_forb.as_dict(),
self.cohp_lobster_forb_dict.as_dict())
# Testing the LMTO dicts will be more involved. Since the average
# is calculated and not read, there may be differences in rounding
# with a very small number of matrix elements, which would cause the
# test to fail
for key in ["COHP", "ICOHP"]:
self.assertArrayAlmostEqual(
self.cohp_lmto.as_dict()[key]["average"]["1"],
self.cohp_lmto_dict.as_dict()[key]["average"]["1"], 5)
for key in self.cohp_lmto.as_dict():
if key not in ["COHP", "ICOHP"]:
self.assertEqual(self.cohp_lmto.as_dict()[key],
self.cohp_lmto_dict.as_dict()[key])
else:
for bond in self.cohp_lmto.as_dict()[key]:
if bond != "average":
self.assertEqual(self.cohp_lmto.as_dict()[key][bond],
self.cohp_lmto_dict.as_dict()[key][bond])
def test_icohp_values(self):
# icohp_ef are the ICHOP(Ef) values taken from
# the ICOHPLIST.lobster file.
icohp_ef_dict = {"1": {Spin.up: -0.10218, Spin.down: -0.19701},
"2": {Spin.up: -0.28485, Spin.down: -0.58279}}
all_cohps_lobster = self.cohp_lobster.all_cohps
for bond in icohp_ef_dict:
icohp_ef = all_cohps_lobster[bond].get_interpolated_value(
self.cohp_lobster.efermi, integrated=True)
self.assertEqual(icohp_ef_dict[bond], icohp_ef)
icoop_ef_dict = {"1": {Spin.up: 0.14245},
"2": {Spin.up: -0.04118},
"3": {Spin.up: 0.14245},
"4": {Spin.up: -0.04118},
"5": {Spin.up: -0.03516},
"6": {Spin.up: 0.10745},
"7": {Spin.up: -0.03516},
"8": {Spin.up: 0.10745},
"9": {Spin.up: -0.12395},
"10": {Spin.up: 0.24714},
"11": {Spin.up: -0.12395}}
all_coops_lobster = self.coop_lobster.all_cohps
for bond in icoop_ef_dict:
icoop_ef = all_coops_lobster[bond].get_interpolated_value(
self.coop_lobster.efermi, integrated=True)
self.assertEqual(icoop_ef_dict[bond], icoop_ef)
def test_get_cohp_by_label(self):
self.assertEqual(self.cohp_orb.get_cohp_by_label("1").energies[0], -11.7225)
self.assertEqual(self.cohp_orb.get_cohp_by_label("1").energies[5], -11.47187)
self.assertFalse(self.cohp_orb.get_cohp_by_label("1").are_coops)
self.assertEqual(self.cohp_orb.get_cohp_by_label("1").cohp[Spin.up][0], 0.0)
self.assertEqual(self.cohp_orb.get_cohp_by_label("1").cohp[Spin.up][300], 0.03392)
self.assertEqual(self.cohp_orb.get_cohp_by_label("average").cohp[Spin.up][230], -0.08792)
self.assertEqual(self.cohp_orb.get_cohp_by_label("average").energies[230], -0.19368000000000007)
self.assertFalse(self.cohp_orb.get_cohp_by_label("average").are_coops)
# test methods from super class that could be overwritten
self.assertEqual(self.cohp_orb.get_icohp()[Spin.up][3], 0.0)
self.assertEqual(self.cohp_orb.get_cohp()[Spin.up][3], 0.0)
def test_get_summed_cohp_by_label_list(self):
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1"]).energies[0], -11.7225)
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"]).energies[0], -11.7225)
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1"]).energies[5], -11.47187)
self.assertFalse(self.cohp_orb.get_summed_cohp_by_label_list(["1"]).are_coops)
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1"]).cohp[Spin.up][0], 0.0)
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"]).cohp[Spin.up][0], 0.0)
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"]).cohp[Spin.up][300], 0.03392 * 2.0)
self.assertEqual(self.cohp_orb.get_summed_cohp_by_label_list(["1", "1"], divisor=2).cohp[Spin.up][300], 0.03392)
def test_get_summed_cohp_by_label_and_orbital_list(self):
ref = self.cohp_orb.orb_res_cohp["1"]["4s-4px"]
ref2 = self.cohp_orb.orb_res_cohp["1"]["4px-4pz"]
cohp_label = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1"], ["4s-4px"])
cohp_label2 = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1", "1"], ["4s-4px", "4s-4px"])
cohp_label2x = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1", "1"], ["4s-4px", "4s-4px"],
divisor=2)
cohp_label3 = self.cohp_orb.get_summed_cohp_by_label_and_orbital_list(["1", "1"], ["4px-4pz", "4s-4px"])
self.assertArrayEqual(cohp_label.cohp[Spin.up], ref["COHP"][Spin.up])
self.assertArrayEqual(cohp_label2.cohp[Spin.up], ref["COHP"][Spin.up] * 2.0)
self.assertArrayEqual(cohp_label3.cohp[Spin.up], ref["COHP"][Spin.up] + ref2["COHP"][Spin.up])
self.assertArrayEqual(cohp_label.icohp[Spin.up], ref["ICOHP"][Spin.up])
self.assertArrayEqual(cohp_label2.icohp[Spin.up], ref["ICOHP"][Spin.up] * 2.0)
self.assertArrayEqual(cohp_label2x.icohp[Spin.up], ref["ICOHP"][Spin.up])
self.assertArrayEqual(cohp_label3.icohp[Spin.up], ref["ICOHP"][Spin.up] + ref2["ICOHP"][Spin.up])
def test_orbital_resolved_cohp(self):
# When read from a COHPCAR file, total COHPs are calculated from
# the orbital-resolved COHPs if the total is missing. This may be
# case for LOBSTER version 2.2.0 and earlier due to a bug with the
# cohpgenerator keyword. The calculated total should be approximately
# the total COHP calculated by LOBSTER. Due to numerical errors in
# the LOBSTER calculation, the precision is not very high though.
self.assertArrayAlmostEqual(
self.cohp_orb.all_cohps["1"].cohp[Spin.up],
self.cohp_notot.all_cohps["1"].cohp[Spin.up], decimal=3)
self.assertArrayAlmostEqual(
self.cohp_orb.all_cohps["1"].icohp[Spin.up],
self.cohp_notot.all_cohps["1"].icohp[Spin.up], decimal=3)
# Tests different methods for getting orbital-resolved COHPs
ref = self.cohp_orb.orb_res_cohp["1"]["4s-4px"]
cohp_label = self.cohp_orb.get_orbital_resolved_cohp("1",
"4s-4px")
self.assertEqual(cohp_label.cohp, ref["COHP"])
self.assertEqual(cohp_label.icohp, ref["ICOHP"])
orbitals = [[Orbital.s, Orbital.px], ["s", "px"], [0, 3]]
cohps = [self.cohp_orb.get_orbital_resolved_cohp("1",
[[4, orb[0]], [4, orb[1]]]) for orb in orbitals]
# print(cohps)
for cohp in cohps:
self.assertEqual(cohp.as_dict(), cohp_label.as_dict())
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
#################################################################################
# # #
# GraphCSV converts CSV input files into a HTML file containing all CSS and #
# JavaScript inline. #
# #
# @author Shane Brennan #
# @date 20160716 #
#################################################################################
import csv
import math
import sys
import os
import datetime
import time
import re
import json
import datetime
import collections
import logging
import random
from sets import Set
from dateutil.relativedelta import relativedelta
from optparse import OptionParser
from logging.handlers import RotatingFileHandler
class GraphCSV:
def __init__(self, ignoreHeaderFlag, lineChartFlag, inputFilename):
# Set the flags to generate the chart options
self.ignoreHeaderFlag = ignoreHeaderFlag
# Instantiate the chart object
self.chart = Chart(lineChartFlag)
# Parse the CSV input and update the chart
self.processCSV(inputFilename)
# Output the NVD3 chart as HTML
html = self.chart.generateHTML()
print html
def processCSV(self, inputFilename):
""" Process the CSV input and convert the input data into
a valid NVD3 chart.
"""
inputFile = open(inputFilename, 'r')
inputCSV = csv.reader(inputFile)
try:
index = 0
for row in inputCSV:
if len(row) != 3:
raise InputException('Input not in three-column CSV format')
if self.ignoreHeaderFlag:
if index > 0:
category = row[0]
xPos = row[1]
yPos = row[2]
self.chart.addElement(category, xPos, yPos)
else:
category = row[0]
xPos = row[1]
yPos = row[2]
self.chart.addElement(category, xPos, yPos)
index += 1
except InputException, err:
print 'Error: ', err.msg
exit
class Chart:
def __init__(self, lineChartFlag):
# Assign a random name to the chart
self.chartName = 'chart_' + ''.join(random.choice('1234567890') for i in range(6))
self.xFormat = None
self.lineChartFlag = lineChartFlag
# Setup the chart elements and categories
self.categoryList = []
self.categoryNames = Set()
def addElement(self, categoryName, xPos, yPos):
""" Adds an element to the category and elements list.
"""
if self.xFormat is None:
self.xFormat = self.getFormat(xPos)
if categoryName in self.categoryNames:
index = self.getIndex(categoryName)
category = self.categoryList[index]
category.add(categoryName, xPos, yPos)
self.categoryList[index] = category
else:
category = Category(categoryName, xPos, yPos)
self.categoryList.append(category)
self.categoryNames.add(categoryName)
def getIndex(self, categoryName):
""" Returns the index of a category matching the provided name.
"""
for index, category in enumerate(self.categoryList):
if category.getName() == categoryName:
return index
return -1
def getFormat(self, value):
date1 = re.compile('[0-9]{4}[-/]{1}[0-9]{2}[-/]{1}[0-9]{2}')
date2 = re.compile('[0-9]{2}[-/]{1}[0-9]{2}[-/]{1}[0-9]{4}')
date3 = re.compile('20[0-9]{1}[0-9]{1}[0-1]{1}[0-9]{1}[0-3]{1}[0-9]{1}')
float1 = re.compile('[0-9]+.[0-9]+')
if date1.match(value):
return '%Y-%m-%d'
elif date2.match(value):
return '%d-%m-%Y'
elif date3.match(value):
return '%Y%m%d'
elif float1.match(value):
return '%.2f'
else:
return '%.0f'
def generateHTML(self):
""" Generate HTML from the provided CSV.
"""
html = "<!DOCTYPE html>\n"
html += "<html lang=\"en\">\n"
html += "<!--- HEAD ---->\n"
html += "<head>\n"
html += " <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\"/>\n"
html += " <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, maximum-scale=1.0\"/>\n"
html += " <title>Grapher v1.0 Dashboard</title>\n"
html += " <link href=\"https://fonts.googleapis.com/icon?family=Material+Icons\" rel=\"stylesheet\">\n"
html += " <link href=\"http://materializecss.com/css/ghpages-materialize.css\" type=\"text/css\" rel=\"stylesheet\" media=\"screen,projection\"/ >\n"
html += " <link href=\"http://nvd3.org/assets/css/nv.d3.css\" rel=\"stylesheet\" type=\"text/css\">\n"
html += " <script src=\"http://nvd3.org/assets/lib/d3.v3.js\" charset=\"utf-8\"></script>\n"
html += " <script src=\"http://nvd3.org/assets/js/nv.d3.js\"></script>\n"
html += " <script src=\"http://nvd3.org/assets/js/data/stream_layers.js\"></script>\n"
html += "</head>\n"
html += "<!--- BODY ---->\n"
html += "<body>\n\n"
html += '<div id="{0}" style="height: 300px;"><svg></svg></div>\n'.format(self.chartName)
html += "<!-- Script of Priority Tasks -->\n"
html += "<script>\n"
if self.lineChartFlag:
html += "<script>\n"
html += "\tvar chart = nv.models.stackedAreaChart()\n"
html += "\t\t.margin({right: 100})\n"
html += "\t\t.x(function(d) { return parseDate(d.x); } )\n"
html += "\t\t.useInteractiveGuideline(true)\n"
html += "\t\t.rightAlignYAxis(true)\n"
html += "\t\t.transitionDuration(500)\n"
html += "\t\t.showControls(true)\n"
html += "\t\t.clipEdge(true);\n"
if '%Y' in self.xFormat:
html += "\tvar parseDate = d3.time.format(\"{0}\").parse;\n".format(self.xFormat)
html += "\tchart.xAxis.tickFormat(function(d) {\n"
html += "\treturn d3.time.format('{0}')(new Date(d))\n".format(self.xFormat)
else:
html += "\tchart.xAxis.tickFormat(d3.format(',.2f'));\n"
html += "\t});\n"
html += "\tchart.yAxis.tickFormat(d3.format(',.2f'));\n"
else:
html += "\tvar chart = nv.models.multiBarChart()\n"
html += "\t\t.reduceXTicks(false)\n"
html += "\t\t.rotateLabels(30)\n"
html += "\t\t.showControls(false)\n"
html += "\t\t.stacked(true)\n"
html += "\t\t.groupSpacing(0.1);\n\n"
html += "d3.select('#{0} svg').datum([\n".format(self.chartName)
for categoryName in self.categoryNames:
index = self.getIndex(categoryName)
category = self.categoryList[index]
jsonOutput = category.getJSON()
jsonOutput = jsonOutput.replace('\n','')
jsonOutput = re.sub(r'\s+', ' ', jsonOutput)
jsonOutput = re.sub(r'"key"', '\n\tkey', jsonOutput)
jsonOutput = re.sub(r'"color"', '\n\tcolor', jsonOutput)
jsonOutput = re.sub(r'"values"', '\n\tvalues', jsonOutput)
jsonOutput = re.sub(r'{ "x"', '\n\t{ x', jsonOutput)
jsonOutput = re.sub(r'} ]}', '}\n\t]\n}', jsonOutput)
html += jsonOutput
html += ',\n'
html = html[:-2]
html += '\n'
html += "]).transition().duration(500).call(chart);\n"
html += "</script>\n"
html += "</body>\n"
html += "</html>\n"
# Now clean up some of the formatting
html = html.replace('"x"','x')
html = html.replace('"y"','y')
html = html.replace('"key"','key')
html = html.replace('"color"','color')
html = html.replace('"values"','values')
return html
class Category:
def __init__(self, categoryName, xPos, yPos):
self.name = categoryName
self.categoryDict = collections.OrderedDict()
self.categoryDict['key'] = categoryName
self.categoryDict['color'] = "#%06x" % random.randint(0, 0xFFFFFF)
coordDict = collections.OrderedDict()
coordDict['x'] = xPos
coordDict['y'] = float(yPos)
valuesList = [ coordDict ]
self.categoryDict['values'] = valuesList
def add(self, name, xPos, yPos):
if self.name == name:
coordDict = collections.OrderedDict()
coordDict['x'] = xPos
coordDict['y'] = float(yPos)
valuesList = self.categoryDict['values']
valuesList.append(coordDict)
self.categoryDict['values'] = valuesList
else:
print 'UNMATCHED', self.categoryDict['key'], name
exit(1)
def getName(self):
return self.categoryDict['key']
def getColour(self):
return self.categoryDict['color']
def getJSON(self):
jsonValue = json.dumps(self.categoryDict, indent=4)
return jsonValue
class InputException(Exception):
""" Define a custom exception for non-standard CSV inputs.
"""
def __init__(self, arg):
self.msg = arg
def main(argv):
parser = OptionParser(usage="Usage: Grapher <filename>")
parser.add_option("-i", "--ignore",
action="store_true",
dest="ignoreHeaderFlag",
default=False,
help="Ignore the header in the CSV")
parser.add_option("-l", "--linechart",
action="store_true",
dest="lineChartFlag",
default=False,
help="Create a line chart.")
(options, filename) = parser.parse_args()
if len(filename) != 1:
print parser.print_help()
exit(1)
elif not os.path.isfile(filename[0]):
print parser.print_help()
print "Input file does not exist"
exit(1)
check = GraphCSV(options.ignoreHeaderFlag, options.lineChartFlag, filename[0])
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adagrad_update_numpy(param, accum, g_t, lr=0.001, epsilon=1e-7):
accum_t = accum + g_t * g_t
param_t = param - lr * g_t / (np.sqrt(accum_t) + epsilon)
return param_t, accum_t
def sparse_adagrad_update_numpy(param,
accum,
gindexs,
gvalues,
lr=0.001,
epsilon=1e-7):
accum_t = copy.deepcopy(accum)
param_t = copy.deepcopy(param)
# first loop accumulates repeated indices if necessary.
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
accum_t[gindex] = accum_t[gindex] + gvalue * gvalue
for i in range(len(gindexs)):
gindex = gindexs[i]
gvalue = gvalues[i]
param_t[gindex] = param_t[gindex] - lr * gvalue / (
np.sqrt(accum_t[gindex]) + epsilon)
return param_t, accum_t
class AdagradOptimizerTest(test.TestCase):
def doTestBasic(self, use_callable_params=False):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = lambda: 3.0
if not use_callable_params:
learning_rate = learning_rate()
ada_opt = adagrad.Adagrad(learning_rate)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, 3.0)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, 3.0)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testBasic(self):
self.doTestBasic()
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_callable_params=True)
def testBasicWithLearningRateDecay(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
decay = 0.5
ada_opt = adagrad.Adagrad(learning_rate, decay=decay)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for t in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr_np = learning_rate / (1 + decay * t)
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, lr_np)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, lr_np)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLargeEpsilon(self):
with self.cached_session():
var0_np = np.array([1.0, 2.0])
var1_np = np.array([3.0, 4.0])
grads0_np = np.array([0.1, 0.1])
grads1_np = np.array([0.01, 0.01])
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.0)
accum0_np = np.array([0.1, 0.1])
accum1_np = np.array([0.1, 0.1])
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for _ in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np, grads0_np,
3.0, 1.0)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np, grads1_np,
3.0, 1.0)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testBasicWithLearningRateInverseTimeDecay(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
decay = 0.5
lr_schedule = learning_rate_schedule.InverseTimeDecay(
learning_rate, decay_steps=1.0, decay_rate=decay)
ada_opt = adagrad.Adagrad(lr_schedule)
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
if not context.executing_eagerly():
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([1.0, 2.0], v0_val)
self.assertAllClose([3.0, 4.0], v1_val)
# Run 3 steps of adagrad
for t in range(3):
if not context.executing_eagerly():
self.evaluate(ada_update)
else:
ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
lr_np = learning_rate / (1 + decay * t)
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, lr_np)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, lr_np)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0], [3.0, 4.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType(
[[1.0, 2.0], [3.0, 4.0]], var0.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0, 1], [3, 4]], var0.eval(), atol=0.01)
@test_util.run_deprecated_v1
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = constant_op.constant(3.0)
ada_opt = adagrad.Adagrad(learning_rate)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, learning_rate)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseBasic(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0_np_indices = np.array([0, 2], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
grads1_np_indices = np.array([0, 2], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np[grads1_np_indices]),
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 3.0, 4.0], var1.eval())
accum0_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1, 0.1], dtype=dtype.as_numpy_dtype)
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
var0_np, accum0_np = sparse_adagrad_update_numpy(
var0_np, accum0_np, grads0_np_indices,
grads0_np[grads0_np_indices], learning_rate)
var1_np, accum1_np = sparse_adagrad_update_numpy(
var1_np, accum1_np, grads1_np_indices,
grads1_np[grads1_np_indices], learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
@test_util.run_deprecated_v1
def testSparseSingleVarDim(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
grads0_np_indices = np.array([0], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np[grads0_np_indices]),
constant_op.constant(grads0_np_indices), constant_op.constant([3]))
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate, epsilon=1.)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0], var0.eval())
accum0_np = np.array([0.1], dtype=dtype.as_numpy_dtype)
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
var0_np, accum0_np = sparse_adagrad_update_numpy(
var0_np,
accum0_np,
grads0_np_indices,
grads0_np[grads0_np_indices],
learning_rate,
epsilon=1.)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
@test_util.run_deprecated_v1
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_np = np.array([[1.0], [2.0]], dtype=dtype.as_numpy_dtype)
repeated_index_update_var = resource_variable_ops.ResourceVariable(
var_np, dtype=dtype)
aggregated_update_var = resource_variable_ops.ResourceVariable(
var_np, dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adagrad.Adagrad(3.0).apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adagrad.Adagrad(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
@test_util.run_deprecated_v1
def testSparseRepeatedIndicesByEmbeddingLookUp(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var_repeated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_repeated = lambda: math_ops.reduce_sum( # pylint: disable=g-long-lambda
embedding_ops.embedding_lookup(var_repeated, [0, 0])) # pylint: disable=cell-var-from-loop
var_aggregated = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
loss_aggregated = lambda: 2 * math_ops.reduce_sum( # pylint: disable=g-long-lambda
embedding_ops.embedding_lookup(var_aggregated, [0])) # pylint: disable=cell-var-from-loop
update_op_repeated = adagrad.Adagrad(2.0).minimize(
loss_repeated, var_list=[var_repeated])
update_op_aggregated = adagrad.Adagrad(2.0).minimize(
loss_aggregated, var_list=[var_aggregated])
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
for _ in range(3):
update_op_repeated.run()
update_op_aggregated.run()
self.assertAllCloseAccordingToType(
var_repeated.eval(), var_aggregated.eval())
@test_util.run_deprecated_v1
def testSparseStability(self):
for dtype in [dtypes.half]:
with self.cached_session():
shape = [1, 6]
var0_np = np.array([[
0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257, -0.0105945
]],
dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
grads0_np = np.array([[
-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05, -8.4877e-05,
-9.48906e-05
]],
dtype=dtype.as_numpy_dtype)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np), constant_op.constant([0]),
constant_op.constant(shape))
ada_opt = adagrad.Adagrad(1.0)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
slot0 = ada_opt.get_slot(var0, "accumulator")
init = variables.global_variables_initializer()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[
0.00891194, -0.10712013, 0.11047515, 0.22636929, -0.0144573,
-0.01029443
]]), var0.eval())
@test_util.run_deprecated_v1
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.cached_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
learning_rate = 3.0
ada_opt = adagrad.Adagrad(learning_rate)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEqual(slot0.shape, var0.shape)
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEqual(slot1.shape, var1.shape)
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
accum0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
for _ in range(3):
var0_np, accum0_np = adagrad_update_numpy(var0_np, accum0_np,
grads0_np, learning_rate)
var1_np, accum1_np = adagrad_update_numpy(var1_np, accum1_np,
grads1_np, learning_rate)
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testConstructAdagradWithLR(self):
opt = adagrad.Adagrad(lr=1.0)
opt_2 = adagrad.Adagrad(learning_rate=0.1, lr=1.0)
opt_3 = adagrad.Adagrad(learning_rate=0.1)
self.assertIsInstance(opt.lr, variables.Variable)
self.assertIsInstance(opt_2.lr, variables.Variable)
self.assertIsInstance(opt_3.lr, variables.Variable)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(opt.lr), (1.0))
self.assertAllClose(self.evaluate(opt_2.lr), (1.0))
self.assertAllClose(self.evaluate(opt_3.lr), (0.1))
if __name__ == "__main__":
test.main()
|
|
import time
import subprocess
import logging
from emuvim.dcemulator.net import DCNetwork
from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
from emuvim.dcemulator.resourcemodel.upb.simple import UpbSimpleCloudDcRM
from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
import os
import sys
import thread
from mininet.node import RemoteController
from mininet.node import DefaultController
from mininet.clean import cleanup
def prepareDC():
""" Prepares physical topology to place chains. """
# We use Sonata data center construct to simulate physical servers (just
# servers hereafter). The reason is that Sonata DC has CPU/RAM resource
# constraints just like the servers. We also model the links between servers
# with bandwidth constraints of Sonata switch-to-DC link.
# The topology we create below is one rack with two servers. The rack has
# ToR switches (Sonata switch called "tor1"), to place chain VNFs.
# Similar to the paper story of middlebox-as-a-server, we will put client
# and server (traffic source and sink) outside the DC.
# Here is the reason why we do not use Sonata "host" to model the servers.
# Sonata uses Mininet host construct as-is. Mininet "host" supports only CPU
# resource constraint. Therefore, we do not use Sonata "host" construct.
# Unless otherwise specified, we always use "server" for variables and
# description instead of "DC". This should avoid confusion with terminology.
# add resource model (rm) to limit cpu/ram available in each server. We
# create one resource mode and use it for all servers, meaning all of our
# servers are homogeneous. Create multiple RMs for heterogeneous servers
# (with different amount of cpu,ram).
MAX_CU = 8 # max compute units
MAX_MU = 30000 # max memory units
MAX_CU_NET = 24
MAX_MU_NET = 400000
# the cpu, ram resource above are consumed by VNFs with one of these
# flavors. For some reason memory allocated for tiny flavor is 42 MB,
# instead of 32 MB in this systems. Other flavors are multipliers of this
# 42 MB (as expected).
# "tiny", {"compute": 0.5, "memory": 32, "disk": 1}
# "small", {"compute": 1.0, "memory": 128, "disk": 20}
# "medium", {"compute": 4.0, "memory": 256, "disk": 40}
# "large", {"compute": 8.0, "memory": 512, "disk": 80}
# "xlarge", {"compute": 16.0, "memory": 1024, "disk": 160}
#
# Note that all these container VNFs need at least 500 MB of memory to be
# able to work. Firewall in particular, runs OVS, needs more than 1 GB to be
# able to process packets. If you do not allocate sufficient CPU, system
# behaves bad. In most cases all physical cores gets pinned (probably
# because of the contention between OVS and cgroup mem limitation) and
# Sonata VM OOM killer starts killing random processes.
# net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False,
# dc_emulation_max_cpu=MAX_CU_NET,
# dc_emulation_max_mem=MAX_MU_NET)
net = DCNetwork(controller=RemoteController, monitor=False, enable_learning=False)
# reg = ResourceModelRegistrar(MAX_CU, MAX_MU)
# rm1 = UpbSimpleCloudDcRM(MAX_CU, MAX_MU)
# rm2 = UpbSimpleCloudDcRM(MAX_CU * 2, MAX_MU * 2)
# rm3 = UpbSimpleCloudDcRM(MAX_CU, MAX_MU)
# reg.register("homogeneous_rm", rm)
# add 3 servers
off_cloud = net.addDatacenter('off-cloud') # place client/server VNFs
chain_server1 = net.addDatacenter('chain-server1')
# chain_server2 = net.addDatacenter('chain-server2')
# off_cloud.assignResourceModel(rm1)
# chain_server1.assignResourceModel(rm2)
# chain_server2.assignResourceModel(rm3)
# connect data centers with switches
tor1 = net.addSwitch('tor1')
# link data centers and switches
net.addLink(off_cloud, tor1)
net.addLink(chain_server1, tor1)
# net.addLink(chain_server2, tor1)
# create REST API endpoint
api = RestApiEndpoint("0.0.0.0", 5001)
# connect API endpoint to containernet
api.connectDCNetwork(net)
# connect data centers to the endpoint
api.connectDatacenter(off_cloud)
api.connectDatacenter(chain_server1)
# api.connectDatacenter(chain_server2)
# start API and containernet
api.start()
net.start()
return (net, api, [off_cloud, chain_server1])
# return (net, dc, api)
def set_bw(multiplier):
low_bw = 1 * multiplier / 10
high_bw = 2 * multiplier / 10
print("Scaling up bandwidth by %d and %d" % (low_bw, high_bw))
sys.stdout.flush()
# Output DC1
os.system('ovs-vsctl -- set Port dc1.s1-eth2 qos=@newqos -- \
--id=@newqos create QoS type=linux-htb other-config:max-rate=' + str(high_bw) + ' queues=0=@q0 -- \
--id=@q0 create Queue other-config:min-rate=' + str(high_bw) + ' other-config:max-rate=' + str(high_bw))
# os.system('ovs-vsctl -- set Port dc2.s1-eth1 qos=@newqos -- \
# --id=@newqos create QoS type=linux-htb other-config:max-rate=' + str(high_bw) + ' queues=0=@q0 -- \
# --id=@q0 create Queue other-config:min-rate=' + str(high_bw) + ' other-config:max-rate=' + str(high_bw))
def nodeUpgrade():
""" Implements node-upgrade scenario. TBD. """
cmds = []
net, api, dcs = prepareDC()
off_cloud, cs1 = dcs[0], dcs[1]
fl = "ids"
# create client with one interface
client = off_cloud.startCompute("client", image='knodir/client',
flavor_name=fl,
network=[{'id': 'intf1', 'ip': '10.0.0.2/24'}])
client.sendCmd('sudo ifconfig intf1 hw ether 00:00:00:00:00:1')
# create NAT VNF with two interfaces. Its 'input'
# interface faces the client and output interface the server VNF.
nat = cs1.startCompute("nat", image='knodir/nat',
flavor_name=fl,
network=[{'id': 'input', 'ip': '10.0.0.3/24'},
{'id': 'output', 'ip': '10.0.1.4/24'}])
nat.sendCmd('sudo ifconfig input hw ether 00:00:00:00:00:2')
nat.sendCmd('sudo ifconfig output hw ether 00:00:00:00:00:3')
# create fw VNF with two interfaces. 'input' interface for 'client' and
# 'output' interface for the 'ids' VNF. Both interfaces are bridged to
# ovs1 bridge. knodir/sonata-fw-vnf has OVS and Ryu controller.
fw = cs1.startCompute("fw", image='knodir/sonata-fw-iptables2',
flavor_name=fl,
network=[{'id': 'input', 'ip': '10.0.1.5/24'},
{'id': 'output-ids1', 'ip': '10.0.1.60/24'},
{'id': 'output-ids2', 'ip': '10.0.1.61/24'},
{'id': 'output-vpn', 'ip': '10.0.2.4/24'}])
fw.sendCmd('sudo ifconfig input hw ether 00:00:00:00:00:4')
fw.sendCmd('sudo ifconfig output-ids1 hw ether 00:00:00:00:00:05')
fw.sendCmd('sudo ifconfig output-ids2 hw ether 00:00:00:00:01:05')
fw.sendCmd('sudo ifconfig output-vpn hw ether 00:00:00:00:00:6')
# create ids VNF with two interfaces. 'input' interface for 'fw' and
# 'output' interface for the 'server' VNF.
ids1 = cs1.startCompute("ids1", image='knodir/snort-trusty',
flavor_name=fl,
network=[{'id': 'input', 'ip': '10.0.1.70/24'},
{'id': 'output', 'ip': '10.0.1.80/24'}])
ids1.sendCmd('sudo ifconfig input hw ether 00:00:00:00:00:7')
ids1.sendCmd('sudo ifconfig output hw ether 00:00:00:00:00:8')
ids2 = cs1.startCompute("ids2", image='knodir/snort-xenial',
flavor_name=fl,
network=[{'id': 'input', 'ip': '10.0.1.71/24'},
{'id': 'output', 'ip': '10.0.1.81/24'}])
ids2.sendCmd('sudo ifconfig input hw ether 00:00:00:00:00:7')
ids2.sendCmd('sudo ifconfig output hw ether 00:00:00:00:00:8')
# create VPN VNF with two interfaces. Its 'input'
# interface faces the client and output interface the server VNF.
vpn = cs1.startCompute("vpn", image='knodir/vpn-client',
flavor_name=fl,
network=[{'id': 'input-ids1', 'ip': '10.0.1.90/24'},
{'id': 'input-ids2', 'ip': '10.0.1.91/24'},
{'id': 'input-fw', 'ip': '10.0.2.5/24'},
{'id': 'output', 'ip': '10.0.10.2/24'}])
vpn.sendCmd('sudo ifconfig input-ids1 hw ether 00:00:00:00:00:9')
vpn.sendCmd('sudo ifconfig input-fw hw ether 00:00:00:00:00:10')
vpn.sendCmd('sudo ifconfig output hw ether 00:00:00:00:00:11')
# create server VNF with one interface. Do not change assigned 10.0.10.10/24
# address of the server. It is the address VPN clients use to connect to the
# server and this address is hardcoded inside client.ovpn of the vpn-client
# Docker image. We also remove the injected routing table entry for this
# address. So, if you change this address make sure it is changed inside
# client.ovpn file as well as subprocess mn.vpn route injection call below.
server = off_cloud.startCompute("server", image='knodir/vpn-server',
flavor_name=fl,
network=[{'id': 'intf2', 'ip': '10.0.10.10/24'}])
server.sendCmd('sudo ifconfig intf2 hw ether 00:00:00:00:00:12')
# execute /start.sh script inside firewall Docker image. It starts Ryu
# controller and OVS with proper configuration.
cmd = 'sudo docker exec -i mn.fw /bin/bash /root/start.sh &'
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from fw start.sh start (0 is success)' % execStatus)
# os.system("sudo docker update --cpus 64 --cpuset-cpus 0-63 mn.client mn.nat mn.fw mn.ids1 mn.ids2 mn.vpn mn.server")
# os.system("sudo docker update --cpus 8 --cpuset-cpus 0-7 mn.client mn.nat mn.fw mn.ids1 mn.ids2 mn.vpn mn.server")
# os.system("sudo docker update --cpu-shares 200000 mn.fw")
print('> sleeping 2s to wait ryu controller initialize')
time.sleep(2)
print('< wait complete')
print('fw start done')
# execute /start.sh script inside ids image. It bridges input and output
# interfaces with br0, and starts ids process listering on br0.
cmd = 'sudo docker exec -i mn.ids1 /bin/bash -c "sh /start.sh"'
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from ids1 start.sh start (0 is success)' % execStatus)
cmd = 'sudo docker exec -i mn.ids2 /bin/bash -c "sh /start.sh"'
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from ids2 start.sh start (0 is success)' % execStatus)
# execute /start.sh script inside nat image. It attaches both input
# and output interfaces to OVS bridge to enable packet forwarding.
cmd = 'sudo docker exec -i mn.nat /bin/bash /start.sh'
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from nat start.sh start (0 is success)' % execStatus)
# chain 'client <-> nat <-> fw <-> ids <-> vpn <-> server'
net.setChain('client', 'nat', 'intf1', 'input', bidirectional=True,
cmd='add-flow')
net.setChain('nat', 'fw', 'output', 'input', bidirectional=True,
cmd='add-flow')
net.setChain('fw', 'ids1', 'output-ids1', 'input', bidirectional=True,
cmd='add-flow')
net.setChain('fw', 'ids2', 'output-ids2', 'input', bidirectional=True,
cmd='add-flow')
net.setChain('fw', 'vpn', 'output-vpn', 'input-fw', bidirectional=True,
cmd='add-flow')
net.setChain('ids1', 'vpn', 'output', 'input-ids1', bidirectional=True,
cmd='add-flow')
net.setChain('ids2', 'vpn', 'output', 'input-ids2', bidirectional=True,
cmd='add-flow')
net.setChain('vpn', 'server', 'output', 'intf2', bidirectional=True,
cmd='add-flow')
# start openvpn server and related services inside openvpn server
cmds.append('sudo docker exec -i mn.server /bin/bash -c "ufw enable"')
# open iperf3 port (5201) on firewall (ufw)
cmds.append('sudo docker exec -i mn.server /bin/bash -c "ufw allow 5201"')
cmds.append('sudo docker exec -i mn.server /bin/bash -c "ufw status"')
cmds.append('sudo docker exec -i mn.server /bin/bash -c "service openvpn start"')
cmds.append('sudo docker exec -i mn.server /bin/bash -c "service openvpn status"')
cmds.append('sudo docker exec -i mn.server /bin/bash -c "service rsyslog start"')
cmds.append('sudo docker exec -i mn.server /bin/bash -c "service rsyslog status"')
# execute /start.sh script inside VPN client to connect to VPN server.
cmds.append('sudo docker exec -i mn.vpn /bin/bash /start.sh &')
for cmd in cmds:
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from %s (0 is success)' % (execStatus, cmd))
cmds[:] = []
print('> sleeping 5 to VPN client initialize...')
time.sleep(5)
print('< wait complete')
print('VPN client VNF started')
# rewrite client and NAT VNF MAC addresses for tcpreplay
cmds.append('sudo docker exec -i mn.client /bin/bash -c "ifconfig intf1 hw ether 00:00:00:00:00:01"')
cmds.append('sudo docker exec -i mn.nat /bin/bash -c "ifconfig input hw ether 00:00:00:00:00:02"')
# manually chain routing table entries on VNFs
cmds.append('sudo docker exec -i mn.client /bin/bash -c "route add -net 10.0.0.0/16 dev intf1"')
cmds.append('sudo docker exec -i mn.client /bin/bash -c "route add -net 10.8.0.0/24 dev intf1"')
cmds.append('sudo docker exec -i mn.nat /bin/bash -c "route add -net 10.0.10.0/24 dev output"')
cmds.append('sudo docker exec -i mn.nat /bin/bash -c "ip route add 10.8.0.0/24 dev output"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.0.10.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route del -net 10.0.1.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.0.1.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.8.0.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route del -net 10.0.1.0/24 dev input"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.0.0.0/24 dev input"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.0.1.0/26 dev input"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "route add -net 10.0.0.0/24 dev input-ids1"')
# cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "route add -net 10.0.0.0/24 dev input-ids2"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "ip route del 10.0.10.10/32"')
cmds.append('sudo docker exec -i mn.server /bin/bash -c "route add -net 10.0.0.0/24 dev intf2"')
cmds.append('sudo docker exec -i mn.client /bin/bash -c " ping -i 0.1 -c 10 10.0.10.10"')
cmds.append('sudo docker exec -i mn.client /bin/bash -c " ping -i 0.1 -c 10 10.8.0.1"')
for cmd in cmds:
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from %s (0 is success)' % (execStatus, cmd))
cmds[:] = []
print('ping client -> server after explicit chaining. Packet drop %s%%' %
net.ping([client, server], timeout=5))
os.system('sudo docker cp ../traces/output.pcap mn.client:/')
os.system('sudo docker cp ../traces/ftp.ready.pcap mn.client:/')
return net
def clean_stale(cmds):
# kill existing iperf server
# cmds.append('sudo docker exec -i mn.server /bin/bash -c "pkill iperf3"')
# remove stale iperf output file (if any)
# cmds.append('sudo docker exec -i mn.client /bin/bash -c "rm /tmp/iperf3.json"')
# kill existing dstat
cmds.append('sudo docker exec -i mn.client /bin/bash -c "pkill tcpreplay"')
cmds.append('sudo docker exec -i mn.client /bin/bash -c "pkill python2"')
cmds.append('sudo docker exec -i mn.ids1 /bin/bash -c "pkill python2"')
cmds.append('sudo docker exec -i mn.ids2 /bin/bash -c "pkill python2"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "pkill python2"')
# remove stale dstat output file (if any)
cmds.append('sudo docker exec -i mn.client /bin/bash -c "rm /tmp/dstat.csv"')
cmds.append('sudo docker exec -i mn.ids1 /bin/bash -c "rm /tmp/dstat.csv"')
cmds.append('sudo docker exec -i mn.ids2 /bin/bash -c "rm /tmp/dstat.csv"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "rm /tmp/dstat.csv"')
for cmd in cmds:
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from %s (0 is success)' % (execStatus, cmd))
cmds[:] = []
print('wait 3s for iperf server and other stale processes cleanup')
time.sleep(3)
return cmds
def clean_and_save(cmds, multiplier):
cmds.append('sudo docker exec -i mn.client /bin/bash -c "pkill tcpreplay"')
print('wait 3s for iperf client and other processes terminate')
time.sleep(3)
# kill dstat daemons, they runs as python2 process.
cmds.append('sudo docker exec -i mn.client /bin/bash -c "pkill python2"')
cmds.append('sudo docker exec -i mn.ids1 /bin/bash -c "pkill python2"')
cmds.append('sudo docker exec -i mn.ids2 /bin/bash -c "pkill python2"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "pkill python2"')
# copy the iperf client output file to the local machine
# cmds.append('sudo docker cp mn.client:/tmp/iperf3.json ./output/from-client.json')
cmds.append('rm -rf ./results/upgrade/' + str(multiplier / 10**6) + '*.csv')
cmds.append('sudo docker cp mn.client:/tmp/dstat.csv ./results/upgrade/' +
str(multiplier / 10**6) + '-from-client.csv')
cmds.append('sudo docker cp mn.ids1:/tmp/dstat.csv ./results/upgrade/' +
str(multiplier / 10**6) + '-from-ids1.csv')
cmds.append('sudo docker cp mn.ids2:/tmp/dstat.csv ./results/upgrade/' +
str(multiplier / 10**6) + '-from-ids2.csv')
cmds.append('sudo docker cp mn.vpn:/tmp/dstat.csv ./results/upgrade/' +
str(multiplier / 10**6) + '-from-vpn.csv')
# do remaining cleanup inside containers
# cmds.append('sudo docker exec -i mn.server /bin/bash -c "pkill iperf3"')
for cmd in cmds:
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from %s (0 is success)' % (execStatus, cmd))
cmds[:] = []
return cmds
def switch_ids():
""" Switch IDS1 with IDS2. """
cmds = []
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ovs-ofctl del-flows ovs-1 in_port=1,out_port=2"')
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ovs-ofctl del-flows ovs-1 in_port=2,out_port=1"')
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ovs-ofctl add-flow ovs-1 priority=2,in_port=1,action=output:3"')
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ovs-ofctl add-flow ovs-1 priority=2,in_port=3,action=output:1"')
# # # little hack to enforce immediate impact of the new OVS rule
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ip link set output-ids1 down && ip link set output-ids1 up"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.0.10.0/24 dev output-ids2"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.8.0.0/24 dev output-ids2"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.0.1.0/24 dev output-ids2"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route del -net 10.0.10.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route del -net 10.8.0.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route del -net 10.0.1.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "route del -net 10.0.1.0/24 dev input-ids1"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "route add -net 10.0.1.0/24 dev input-ids2"')
for cmd in cmds:
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from %s (0 is success)' % (execStatus, cmd))
cmds[:] = []
#print('> sleeping 60s to VPN client initialize...')
# time.sleep(60)
#print('< wait complete')
return net
def switch_ids_back():
""" Undoes everything switch_ids() did, i.e., switches IDS2 with IDS1. """
cmds = []
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ovs-ofctl del-flows ovs-1 in_port=1,out_port=3"')
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ovs-ofctl del-flows ovs-1 in_port=3,out_port=1"')
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ovs-ofctl add-flow ovs-1 priority=2,in_port=1,action=output:2"')
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ovs-ofctl add-flow ovs-1 priority=2,in_port=2,action=output:1"')
# # little hack to enforce immediate impact of the new OVS rule
# cmds.append('sudo docker exec -i mn.fw /bin/bash -c "ip link set output-ids2 down && ip link set output-ids2 up"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.0.10.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.8.0.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route add -net 10.0.1.0/24 dev output-ids1"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route del -net 10.0.10.0/24 dev output-ids2"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route del -net 10.8.0.0/24 dev output-ids2"')
cmds.append('sudo docker exec -i mn.fw /bin/bash -c "route del -net 10.0.1.0/24 dev output-ids2"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "route del -net 10.0.1.0/24 dev input-ids2"')
cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "route add -net 10.0.1.0/24 dev input-ids1"')
for cmd in cmds:
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from %s (0 is success)' % (execStatus, cmd))
cmds[:] = []
def benchmark(multiplier):
""" Start traffic generation. """
# list of commands to execute one-by-one
test_time = 300
cmds = []
# clean stale programs and remove old files
print("Benchmarking %d Mbps...", multiplier / 10**6)
cmds.append('sudo rm ./results/upgrade/' +
str(multiplier / 10**6) + '-from-client.csv')
cmds.append('sudo rm ./results/upgrade/' +
str(multiplier / 10**6) + '-from-ids1.csv')
cmds.append('sudo rm ./results/upgrade/' +
str(multiplier / 10**6) + '-from-ids2.csv')
cmds.append('sudo rm ./results/upgrade/' +
str(multiplier / 10**6) + '-from-vpn-fw.csv')
cmds.append('sudo rm ./results/upgrade/' +
str(multiplier / 10**6) + '-from-vpn-ids1.csv')
cmds.append('sudo rm ./results/upgrade/' +
str(multiplier / 10**6) + '-from-vpn-ids2.csv')
cmds.append('sudo rm ./results/upgrade/' +
str(multiplier / 10**6) + '-from-server.csv')
cmds = clean_stale(cmds)
# set_bw(multiplier)
# Set the initial bandwidth constraints of the system
# set_bw(multiplier)
time.sleep(3)
# cmds.append('sudo docker exec -i mn.server /bin/bash -c "iperf3 -s --bind 10.8.0.1" &')
# cmds.append('sudo docker exec -i mn.client /bin/bash -c "dstat --net --time -N intf1 --bits --output /tmp/dstat.csv" &')
# cmds.append('sudo docker exec -i mn.ids1 /bin/bash -c "dstat --net --time -N input --bits --output /tmp/dstat.csv" &')
# cmds.append('sudo docker exec -i mn.vpn /bin/bash -c "dstat --net --time -N input-fw --bits --output /tmp/dstat.csv" &')
cmd = 'sudo timeout %d dstat --net --time -N dc1.s1-eth2 --nocolor --output ./results/upgrade/%d-from-client.csv &' % (
test_time, (multiplier / 10**6))
cmds.append(cmd)
cmd = 'sudo timeout %d dstat --net --time -N dc2.s1-eth8 --nocolor --output ./results/upgrade/%d-from-ids1.csv &' % (
test_time, (multiplier / 10**6))
cmds.append(cmd)
cmd = 'sudo timeout %d dstat --net --time -N dc2.s1-eth10 --nocolor --output ./results/upgrade/%d-from-ids2.csv &' % (
test_time, (multiplier / 10**6))
cmds.append(cmd)
cmd = 'sudo timeout %d dstat --net --time -N dc2.s1-eth12 --nocolor --output ./results/upgrade/%d-from-vpn-ids1.csv &' % (
test_time, (multiplier / 10**6))
cmds.append(cmd)
cmd = 'sudo timeout %d dstat --net --time -N dc2.s1-eth13 --nocolor --output ./results/upgrade/%d-from-vpn-ids2.csv &' % (
test_time, (multiplier / 10**6))
cmds.append(cmd)
cmd = 'sudo timeout %d dstat --net --time -N dc2.s1-eth14 --nocolor --output ./results/upgrade/%d-from-vpn-fw.csv &' % (
test_time, (multiplier / 10**6))
cmds.append(cmd)
cmd = 'sudo timeout %d docker exec -i mn.client /bin/bash -c "tcpreplay --quiet --enable-file-cache \
--loop=0 --mbps=%d -d 1 --intf1=intf1 /ftp.ready.pcap" &' % (test_time, (multiplier / 10**7))
cmds.append(cmd)
cmd = 'sudo timeout %d docker exec -i mn.client /bin/bash -c "tcpreplay --quiet --enable-file-cache \
--loop=0 --mbps=%d -d 1 --intf1=intf1 /output.pcap" &' % (test_time, (multiplier / 10**7))
cmds.append(cmd)
for cmd in cmds:
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from %s (0 is success)' % (execStatus, cmd))
cmds[:] = []
# start ids switch functionality which triggers after 10s
print('switch_ids() activated, waiting 50s before trigger')
time.sleep(test_time / 2)
print('switch_ids() wait complete. Trigger the IDS switch.')
switch_ids()
print('returned %d from %s (0 is success)' % (execStatus, cmd))
print("Wait %d seconds for the test to complete" % (test_time / 2 + 10))
time.sleep(test_time / 2 + 10)
# clean and save the results in csv file named after the test
# cmds = clean_and_save(cmds, multiplier)
cmds.append('sudo killall dstat')
cmds.append('sudo killall tcpreplay')
for cmd in cmds:
execStatus = subprocess.call(cmd, shell=True)
print('returned %d from %s (0 is success)' % (execStatus, cmd))
switch_ids_back()
print('done')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
net = nodeUpgrade()
print("Done with upgrade!")
print('Running 10 Mbps')
benchmark(10**7)
print('Running 100 Mbps')
benchmark(10**8)
print('Running 1000 Mbps')
benchmark(10**9)
print('Running 10000 Mbps')
benchmark(10**10)
# net.CLI()
net.stop()
cleanup()
os.system("sudo ../clean-stale.sh")
|
|
"""Provide data suitable for Fava's charts. """
from datetime import date
from datetime import timedelta
from typing import Any
from typing import Dict
from typing import Generator
from typing import List
from typing import Optional
from typing import Pattern
from typing import Tuple
from typing import Union
from beancount.core import realization
from beancount.core.amount import Amount
from beancount.core.data import iter_entry_dates
from beancount.core.data import Transaction
from beancount.core.inventory import Inventory
from beancount.core.number import Decimal
from beancount.core.position import Position
from simplejson import JSONEncoder
from fava.core._compat import FLAG_UNREALIZED
from fava.core.conversion import cost_or_value
from fava.core.conversion import units
from fava.core.module_base import FavaModule
from fava.core.tree import SerialisedTreeNode
from fava.core.tree import Tree
from fava.helpers import FavaAPIException
from fava.util import listify
from fava.util import pairwise
from fava.util.date import Interval
from fava.util.typing import TypedDict
ONE_DAY = timedelta(days=1)
def inv_to_dict(inventory: Inventory) -> Dict[str, Decimal]:
"""Convert an inventory to a simple cost->number dict."""
return {
pos.units.currency: pos.units.number
for pos in inventory
if pos.units.number is not None
}
Inventory.for_json = inv_to_dict # type: ignore
class FavaJSONEncoder(JSONEncoder):
"""Allow encoding some Beancount date structures."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
# Allow use of a `for_json` method to serialise dict subclasses.
kwargs["for_json"] = True
# Sort dict keys (Flask also does this by default).
kwargs["sort_keys"] = True
super().__init__(*args, **kwargs)
def default(self, o: Any) -> Any: # pylint: disable=method-hidden
if isinstance(o, Decimal):
return float(o)
if isinstance(o, (date, Amount, Position)):
return str(o)
if isinstance(o, (set, frozenset)):
return list(o)
if isinstance(o, Pattern):
return o.pattern
try:
return JSONEncoder.default(self, o)
except TypeError:
return str(o)
ENCODER = FavaJSONEncoder()
def dumps(arg: Any) -> Any:
"""Encode to JSON."""
return ENCODER.encode(arg)
class DateAndBalance(TypedDict):
"""Balance at a date."""
date: date
balance: Union[Dict[str, Decimal], Inventory]
class DateAndBalanceWithBudget(TypedDict):
"""Balance at a date with a budget."""
date: date
balance: Inventory
budgets: Dict[str, Decimal]
class ChartModule(FavaModule):
"""Return data for the various charts in Fava."""
def hierarchy(
self,
account_name: str,
conversion: str,
begin: Optional[date] = None,
end: Optional[date] = None,
) -> SerialisedTreeNode:
"""An account tree."""
if begin is not None and end is not None:
tree = Tree(iter_entry_dates(self.ledger.entries, begin, end))
else:
tree = self.ledger.root_tree
return tree.get(account_name).serialise(
conversion, self.ledger.price_map, end - ONE_DAY if end else None
)
@listify
def prices(
self,
) -> Generator[Tuple[str, str, List[Tuple[date, Decimal]]], None, None]:
"""The prices for all commodity pairs.
Returns:
A list of tuples (base, quote, prices) where prices
is a list of prices.
"""
for base, quote in self.ledger.commodity_pairs():
prices = self.ledger.prices(base, quote)
if prices:
yield base, quote, prices
@listify
def interval_totals(
self,
interval: Interval,
accounts: Union[str, Tuple[str]],
conversion: str,
invert: bool = False,
) -> Generator[DateAndBalanceWithBudget, None, None]:
"""Renders totals for account (or accounts) in the intervals.
Args:
interval: An interval.
accounts: A single account (str) or a tuple of accounts.
conversion: The conversion to use.
"""
price_map = self.ledger.price_map
for begin, end in pairwise(self.ledger.interval_ends(interval)):
inventory = Inventory()
entries = iter_entry_dates(self.ledger.entries, begin, end)
for entry in (e for e in entries if isinstance(e, Transaction)):
for posting in entry.postings:
if posting.account.startswith(accounts):
inventory.add_position(posting)
balance = cost_or_value(
inventory, conversion, price_map, end - ONE_DAY
)
budgets = {}
if isinstance(accounts, str):
budgets = self.ledger.budgets.calculate_children(
accounts, begin, end
)
if invert:
# pylint: disable=invalid-unary-operand-type
balance = -balance
budgets = {k: -v for k, v in budgets.items()}
yield {
"date": begin,
"balance": balance,
"budgets": budgets,
}
@listify
def linechart(
self, account_name: str, conversion: str
) -> Generator[DateAndBalance, None, None]:
"""The balance of an account.
Args:
account_name: A string.
conversion: The conversion to use.
Returns:
A list of dicts for all dates on which the balance of the given
account has changed containing the balance (in units) of the
account at that date.
"""
real_account = realization.get_or_create(
self.ledger.root_account, account_name
)
postings = realization.get_postings(real_account)
journal = realization.iterate_with_balance(postings)
# When the balance for a commodity just went to zero, it will be
# missing from the 'balance' so keep track of currencies that last had
# a balance.
last_currencies = None
price_map = self.ledger.price_map
for entry, _, change, balance_inventory in journal:
if change.is_empty():
continue
balance = inv_to_dict(
cost_or_value(
balance_inventory, conversion, price_map, entry.date
)
)
currencies = set(balance.keys())
if last_currencies:
for currency in last_currencies - currencies:
balance[currency] = 0
last_currencies = currencies
yield {"date": entry.date, "balance": balance}
@listify
def net_worth(
self, interval: Interval, conversion: str
) -> Generator[DateAndBalance, None, None]:
"""Compute net worth.
Args:
interval: A string for the interval.
conversion: The conversion to use.
Returns:
A list of dicts for all ends of the given interval containing the
net worth (Assets + Liabilities) separately converted to all
operating currencies.
"""
transactions = (
entry
for entry in self.ledger.entries
if (
isinstance(entry, Transaction)
and entry.flag != FLAG_UNREALIZED
)
)
types = (
self.ledger.options["name_assets"],
self.ledger.options["name_liabilities"],
)
txn = next(transactions, None)
inventory = Inventory()
price_map = self.ledger.price_map
for end_date in self.ledger.interval_ends(interval):
while txn and txn.date < end_date:
for posting in txn.postings:
if posting.account.startswith(types):
inventory.add_position(posting)
txn = next(transactions, None)
yield {
"date": end_date,
"balance": cost_or_value(
inventory, conversion, price_map, end_date - ONE_DAY
),
}
@staticmethod
def can_plot_query(types: List[Tuple[str, Any]]) -> bool:
"""Whether we can plot the given query.
Args:
types: The list of types returned by the BQL query.
"""
return (
len(types) == 2
and types[0][1] in {str, date}
and types[1][1] is Inventory
)
def query(
self, types: List[Tuple[str, Any]], rows: List[Tuple[Any, ...]]
) -> Any:
"""Chart for a query.
Args:
types: The list of result row types.
rows: The result rows.
"""
if not self.can_plot_query(types):
raise FavaAPIException("Can not plot the given chart.")
if types[0][1] is date:
return [
{"date": date, "balance": units(inv)} for date, inv in rows
]
return [{"group": group, "balance": units(inv)} for group, inv in rows]
|
|
"""This is a helper which provides a set of definitions and Actors
that can be used to run external commands and gather responses from
them.
Create a RunCommand Actor and send it a Command object defining the
command to be run; the RunCommand will execute the command, monitoring
its progress, and sends a CommandResult object when it completes.
If the current Thespian system base supports "Watch" functionality,
the RunCommand Actor will remain responsive while the command is
performed and can interact with an input_src Actor to provide ongoing
input. If no "Watch" functionality is available, the RunCommand will
block waiting for the command to complete and can only provide static
input to the command.
"""
from datetime import datetime, timedelta
import errno
import os
import logging
import subprocess
import time
from thespian.actors import *
HALF_NUM_LINES_LOGGED=5
class Command(object):
"""Defines the Command to be run by the RunCommand Actor.
The 'exe' argument specifies the name of the executable as either
an absolute path or a relative name to be found on the current
PATH. The 'args' is a list of arguments passed to the
executable on the command line.
The 'use_shell' defaults to False, but can be set to True to
indicate that the command should be run via the normal shell.
(This is normally not recommended.)
The 'omit_string' argument can specify a string that should be
suppressed from any logging output generated for this command
(useful for suppressing sensitive information like passwords,
credit card numbers, etc. that should not appear in logs).
The 'error_ok' argument controls the logging mode if the
command fails: normally errors will be logged with the ERROR
severity, but if this argument is true, the INFO severity is
used instead.
The 'logger' specifies what logging should be done for the
command, especially in error conditions. The default is None,
which uses the standard logging endpoint when starting the
command and with the results of the command. A value of False
specifies that no logging is to be performed, otherwise it
should be an ActorAddress which will receive CommandLog
objects.
The 'logtag' argument can be used to specify the prefix value
for identifying all logged output from this command run; it
defaults to the string form of the 'exe' argument, but this may
be long, and also generic if multiple intances of the same
command are run.
The 'input_src' argument specifies any input that should be
provided to the command. The default is None, which indicates
that no input will be provided to the command that is run. If
a string is specified, it is supplied as the stdin to the
executable.
The 'output_updates' argument is either None or specifies an
ActorAddress to which output will be sent as it is generated.
The 'omit_string' and 'max_bufsize' arguments do not affect
this output, and output is not necessarily sent in complete
lines. Normal output is sent in CommandOutput messages and
error output is sent in CommandError messages. Note that for
system bases which do not support the Thespian Watch
functionality, the output will only be sent once the command
completes.
The 'max_bufsize' specifies the maximum amount of normal output
or error output that will be collected. If command output is
in excess of this amount then the middle portion is dropped
(the CommandResult output an error will be a tuple of the
beginning and end parts of the output). The default is 1MB of
output (the limit is applied to normal output and error output
separately, so the total amount of memory that can be consumed
is double the max_bufsize amount).
The 'env' argument specifies the environment varaibles that
should be set for the new process; if not specified, the
current environment is inherited.
The 'timeout' argument should specify a maximum time period for
the command to run to completion. The default is None which
does not set a time limit. If specified, the value should be a
datetime.timedelta, or an integer number of seconds; once the
command run has exceeded that time limit, the command is halted
with a SIGTERM, followed 2 seconds later with a SIGKILL.
If 'report_on_start' is set to True, then the requestor will
receive a CommandStarted message when the command process has
been started.
"""
def __init__(self, exe, args,
use_shell=False,
omit_string=None,
error_ok=False,
logger=None,
logtag=None,
input_src=None,
output_updates=None,
max_bufsize=1024*1024,
env=None,
timeout=None,
report_on_start=False):
self.exe = exe
self.args = args
self.use_shell = use_shell
self.omit_string = omit_string or ''
self.error_ok = error_ok
self.logger = logger
self.logtag = logtag or str(exe)
self.input_src = input_src
self.output_updates = output_updates
self.max_bufsize = max_bufsize
self.env = env
self.timeout = (timeout if isinstance(timeout, timedelta)
else timedelta(seconds=timeout)) if timeout else timeout
self.report_on_start = report_on_start
class CommandStarted(object):
"""Message sent by the RunCommand actor to the sender of a Command to
indicate that the command has been initiated when the
Command.report_on_start is True.
"""
def __init__(self, command, pid):
self.command = command
self.pid = pid
class CommandAbort(object):
"""Message sent to the RunCommand Actor to request a halt of the
currently running command. Note that this will only be
processed asynchronously in a Thespian System Base that
supports the ThespianWatch functionality. If there is no
command currently running, this message does nothing. There is
no response to this message (although it is expected that a
CommandError will subsequently be generated when the running
Command is aborted).
"""
class CommandLog(object):
"Message sent by RunCommand to the specified logger address."
def __init__(self, level, msg, *args):
self.level = level # a string: "info" or "error"
self.message = msg % args
class CommandOutput(object):
"""Message specifying (possibly partial) output received from the
command, and sent to the output_updates Actor. This may be
sent multiple times as output is generated by the running
process. The output is either a string (Python2) or a
bytestring (Python3) as would normally be returned by a read()
from a pipe.
"""
def __init__(self, command, output):
self.command = command # the Command being run
self.output = output
class CommandError(object):
"""Message specifying (possibly partial) error output received from
the command. This is normally sent to the output_updates Actor.
This may be sent multiple times as output is generated by the
running process. The output is either a string (Python2) or a
bytestring (Python3) as would normally be returned by a read()
from a pipe.
"""
def __init__(self, command, error_output):
self.command = command # the Command being run
self.error_output = error_output
class CommandResult(object):
"""Describes the result of executing a Command. Is "truthy" if the
Command completed successfully. Provides the normal and error
output generated by the Command execution (unfiltered). Sent
by the RunCommand Actor back to the sender of the Command to
indicate the completion state.
.command = original Command message
.exitcode = return code from executed command
.stdout = normal output string from executed command
.stderr = error output string from executed command
.errorstr = stderr, or "FAILED" if no stderr and command failed
.duration = timedelta indicating duration of command run
The exitcode will be -2 if the command timed out. Commands
are run sequentially, and Command timing does not start until
the command is run; Commands sent to the RunCommand actor
while a previous command is running will be queued until the
current command completes.
The .stdout and .stderr values may be a tuple of two strings
instead of a string in the case that the size of the
corresponding output was in excess of the max_bufsize
specified in the Command; in this case the first element of
the tuple is the beginning of the output, the second element
of the tuple is the end of the output, and the middle portion
is missing.
"""
def __init__(self, command, exitcode, stdout='', stderr='', duration=None):
self.command = command # original Command message
self.exitcode = exitcode
self.stdout = stdout
self.stderr = stderr
self.duration = duration
def __nonzero__(self):
return 1 if 0 == self.exitcode else 0 # Python2: truthy if command success (exitcode==0)
def __bool__(self):
return 0 == self.exitcode # Python3: truthy if command success (exitcode==0)
@property
def errorstr(self):
return self.stderr or ('' if self else 'FAILED')
def __str__(self):
rval = [self.__class__.__name__, 'success' if self else 'FAILED',]
if not self:
rval.extend(['Error #%d' % self.exitcode,
' [...] '.join(self.stderr)
if isinstance(self.stderr, tuple) else
self.stderr])
return ' '.join(rval)
def str_form(bytestr):
try:
return bytestr.decode('utf-8')
except UnicodeDecodeError:
try:
import chardet
try:
return bytestr.decode(chardet.detect(bytestr)['encoding'])
except UnicodeDecodeError:
pass
except ImportError:
pass
except AttributeError:
return bytestr # already a string
outs = str(bytestr)
class RunCommand(ActorTypeDispatcher):
def __init__(self, capabilities, *args, **kw):
super(RunCommand, self).__init__(*args, **kw)
self.pending_commands = []
self.command_num = 0
self.capabilities = capabilities
def receiveMsg_Command(self, commandmsg, sender):
commandmsg.sender = sender
self.pending_commands.append(commandmsg)
if len(self.pending_commands) == 1:
return self._start_command()
return self._return_watched()
def receiveMsg_CommandAbort(self, abortmsg, sender):
if not getattr(self, 'p', None):
return None
command = self.pending_commands[-1]
command.timeout = timedelta(milliseconds=1)
return self._timed_watch_for_completion(command)
def _return_watched(self):
subp = getattr(self, 'p', None)
if not subp:
return None
try:
return ThespianWatch([subp.stdout.fileno(), subp.stderr.fileno()])
except IOError:
return self_finished_command() # command must have finished just now
except ValueError:
return self_finished_command() # command must have finished just now
def _set_command_timeout(self, command):
if command.timeout:
command.expiration = datetime.now() + command.timeout
self.wakeupAfter(command.timeout, payload=self.command_num)
def _log(self, command, level, msg, *args):
if command.logger:
if isinstance(command.logger, ActorAddress):
self.send(command.logger, CommandLog(level, msg, *args))
elif command.logger is None:
getattr(logging, level)(msg, *args)
def _start_command(self):
can_watch = self.capabilities.get('Thespian Watch Supported', False)
command = self.pending_commands[-1]
self.command_num += 1
logcmd = command.exe + ' ' + ' '.join(command.args)
if command.omit_string:
logcmd = logcmd.replace(command.omit_string, "...")
self._log(command, "info", command.logtag + " CMD: " + logcmd)
self.input_open = command.input_src
self.start_time = datetime.now()
self.output = { 'normal': '', 'normal_fh': '', 'error': '', 'error_fh': '' }
try:
self.p = subprocess.Popen([command.exe] + command.args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=0 if can_watch else (command.max_bufsize or 0),
env=command.env,
shell=command.use_shell)
except OSError as ex:
# Error running the executable
self._add_output(command, 'error', str(ex) + '\n')
return self._finished_command(ex.errno)
if command.report_on_start:
self.send(command.sender, CommandStarted(command, self.p.pid))
if command.input_src:
try:
try:
self.p.stdin.write(command.input_src)
except TypeError:
self.p.stdin.write(command.input_src.encode('utf-8'))
except BrokenPipeError:
pass
except OSError as ex:
if ex.errno == errno.EINVAL and self.p.poll() is not None:
pass # Windows: fails w/EINVAL if proc already exited
else:
raise
try:
self.p.stdin.flush()
except BrokenPipeError:
pass
except OSError as ex:
if ex.errno == errno.EINVAL and self.p.poll() is not None:
pass # Windows: fails w/EINVAL if proc already exited
else:
raise
if can_watch:
self.p.stdin.close() # <-- magic.1: do this or the output gets hung
self.input_open = False
else:
if can_watch:
self.p.stdin.close()
if can_watch:
# magic.2: must nonblock these to allow reading from
# subprocess before it is shutdown.
import fcntl
fcntl.fcntl(self.p.stdout.fileno(), fcntl.F_SETFL,
fcntl.fcntl(self.p.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(self.p.stderr.fileno(), fcntl.F_SETFL,
fcntl.fcntl(self.p.stderr.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
return self._timed_watch_for_completion(command)
def _timed_watch_for_completion(self, command):
can_watch = self.capabilities.get('Thespian Watch Supported', False)
if can_watch:
self._set_command_timeout(command)
return self._return_watched()
# This Thespian base does not support ThespianWatch, so this
# will have to use a blocking wait on the command completion
if command.timeout:
now = datetime.now()
end_time = now + command.timeout
while now < end_time:
time.sleep(0.5)
if self.p.poll() is not None:
break
now = datetime.now()
if now >= end_time:
try:
self.p.terminate()
except OSError as ex:
if ex.errno == 3:
pass # process already gone
else:
raise
end_time += timedelta(seconds=2)
now = datetime.now()
while now < end_time:
if self.p.poll() is not None:
break
time.sleep(0.5)
now = datetime.now()
if self.p.poll() is None:
try:
self.p.kill()
except OSError as ex:
if ex.errno == 3:
pass # process already gone
else:
raise
time.sleep(0.5)
out, err = self.p.communicate(None)
self._add_output(self.pending_commands[-1], 'normal', out)
self._add_output(self.pending_commands[-1], 'error', err)
self._finished_command()
def receiveMsg_WatchMessage(self, watchmsg, sender):
subp = getattr(self, 'p', None)
if subp: # and self.pending_commands?
# n.b. output read from the pipes is a byte string, in an
# unknown encoding, although with older python, they could
# also be strings
if not self.pending_commands:
return
for each in watchmsg.ready:
if each == subp.stdout.fileno():
self._add_output(self.pending_commands[-1],
'normal', subp.stdout.read())
elif each == subp.stderr.fileno():
self._add_output(self.pending_commands[-1],
'error', subp.stderr.read())
if subp.poll() is not None:
return self._finished_command()
return self._return_watched()
def _add_output(self, command, outmark, new_output):
if not new_output:
return
self.output[outmark] += str_form(new_output)
if command.max_bufsize and \
len(self.output[outmark]) + \
len(self.output[outmark+'_fh']) > command.max_bufsize:
if not self.output[outmark+'_fh']:
self.output[outmark+'_fh'] = self.output[outmark][:int(command.max_bufsize/2)]
self.output[outmark] = self.output[outmark][len(self.output[outmark+'_fh']):]
self.output[outmark] = self.output[outmark][-(command.max_bufsize-len(self.output[outmark+'_fh'])):]
updates_to = self.pending_commands[-1].output_updates
if isinstance(updates_to, ActorAddress):
self.send(updates_to, (CommandOutput
if outmark == 'normal' else
CommandError)(command, new_output))
if outmark == 'normal':
self._log_normal_output(new_output)
elif outmark == 'error':
self._log_error_output(new_output)
def _log_normal_output(self, new_output):
# Logs the first HALF_NUM_LINES_LOGGED output lines, followed
# by an elision mark, followed by the last
# HALF_NUM_LINES_LOGGED lines of output (overlapping
# properly), skipping blank lines, tagging appropriately.
self._noli = self._log__output(
'normal', getattr(self, '_noli', None), ' OUT| ',
self.pending_commands[-1], "info")
def _log_error_output(self, new_output):
self._eoli = self._log__output(
'error', getattr(self, '_eoli', None), ' ERR> ',
self.pending_commands[-1],
"info" if self.pending_commands[-1].error_ok else "error")
def _log__output(self, outmark, oli, pfx, command, level):
if oli and oli['cmdnum'] == self.command_num and \
oli['nlines'] >= HALF_NUM_LINES_LOGGED:
return
if not oli or oli['cmdnum'] != self.command_num:
oli = {'cmdnum': self.command_num, 'nbytes': 0, 'nlines': 0,}
# Assumes that the first HALF_NUM_LINES_LOGGED lines is <
# command.max_bufsize / 2
for li in range(oli['nbytes'], len(self.output[outmark])):
if '\n' == self.output[outmark][li]:
lline = self.output[outmark][oli['nbytes']:li].strip()
if lline:
self._log(command, level,
command.logtag + pfx +
(lline.replace(command.omit_string, '...')
if command.omit_string else lline))
oli['nbytes'] += li - oli['nbytes'] + 1
oli['nlines'] += 1
if oli['nlines'] == HALF_NUM_LINES_LOGGED:
break
return oli
def _drain_output(self, command, outmark, fd):
while True:
try:
out = fd.read()
except ValueError: # read on a closed file
return
if not out:
return
self._add_output(command, outmark, out)
def _finished_command(self, errorcode=None):
command = self.pending_commands[-1]
subp = getattr(self, 'p', None)
if subp:
self._drain_output(command, 'normal', subp.stdout)
self._drain_output(command, 'error', subp.stderr)
result = CommandResult(command,
errorcode or
(-4 if not subp or subp.returncode is None else subp.returncode),
((self.output['normal_fh'],
self.output['normal'])
if self.output['normal_fh'] else
self.output['normal']),
((self.output['error_fh'],
self.output['error'])
if self.output['error_fh'] else
self.output['error']),
datetime.now() - self.start_time)
self.pending_commands.pop()
self.output = None
self.input_open = False
self.p = None
self._log_finished_command(result)
self.send(command.sender, result)
if self.pending_commands:
return self._start_command()
return self._return_watched()
def _log_finished_command(self, result):
normal_out = result.stdout
if isinstance(normal_out, tuple):
normal_out = normal_out[1]
else:
normal_out = normal_out[getattr(self, '_noli', {}).get('nbytes', 0):]
nelided = False
for ni in range(0, len(normal_out)):
if '\n' == normal_out[-ni-1]:
lno = list(filter(None, normal_out[-ni:].split('\n')))
if len(lno) == HALF_NUM_LINES_LOGGED:
nelided = ni != len(normal_out)
break
else:
lno = list(filter(None, normal_out.split('\n')))
error_out = result.stderr
if isinstance(error_out, tuple):
error_out = error_out[1]
else:
error_out = error_out[(getattr(self, '_eoli', {}) or {}).get('nbytes', 0):]
eelided = False
for ei in range(0, len(error_out)):
if '\n' == error_out[-ei-1]:
leo = list(filter(None, error_out[-ei:].split('\n')))
if len(leo) == HALF_NUM_LINES_LOGGED:
nelided = ei != len(error_out)
break
else:
leo = list(filter(None, error_out.split('\n')))
lognormal = lambda msg, *args: self._log(result.command, 'info',
msg, *args)
logerror = lambda msg, *args: self._log(result.command,
'info'
if result.command.error_ok
else 'error',
msg, *args)
for each in lno:
lognormal(result.command.logtag + ' OUT| ' +
(each.replace(result.command.omit_string, '...')
if result.command.omit_string else each))
for each in leo:
logerror(result.command.logtag + ' ERR> ' +
(each.replace(result.command.omit_string, '...')
if result.command.omit_string else each))
if result.exitcode:
logerror(result.command.logtag + ' ERROR exit code: %d' % result.exitcode)
else:
lognormal(result.command.logtag + ' completed successfully')
def receiveMsg_WakeupMessage(self, wakemsg, sender):
if not self.pending_commands:
return
if wakemsg.payload != self.command_num:
# This wakeup was from a different command; ignore it
return self._return_watched()
command = self.pending_commands[-1]
subp = getattr(self, 'p', None)
if not subp:
return # should never happen
if subp.poll() is not None:
return self._finished_command()
if getattr(self, 'tried_terminate', False):
subp.kill()
time.sleep(0.5) # Final wait for kill
if subp.poll() is None:
self._log(command, 'error',
tag + " Unable to stop PID %s", subp.pid)
return self._finished_command()
self.tried_terminate = True
subp.terminate()
self.wakeupAfter(timedelta(seconds=2))
return self._return_watched()
def receiveMsg_ActorExitRequest(self, exitmsg, sender):
subp = getattr(self, 'p', None)
if subp:
subp.terminate()
for each in self.pending_commands[:-1]:
self.send(each.sender, CommandResult(each, -3, '', '', None))
if subp and subp.poll() is None:
time.sleep(0.1)
subp.kill()
time.sleep(0.5)
if subp.poll() is None:
self._log(command, 'error',
tag + " Unable to cancel PID %s", subp.pid)
if self.pending_commands:
self._finished_command()
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Key range representation and splitting."""
import os
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
simplejson = None
from google.appengine.api import datastore
from google.appengine.api import namespace_manager
from google.appengine.datastore import datastore_pb
from google.appengine.ext import db
try:
from google.appengine.ext import ndb
except ImportError:
ndb = None
# It is acceptable to set key_range.ndb to the ndb module,
# imported through some other way (e.g. from the app dir).
class Error(Exception):
"""Base class for exceptions in this module."""
class KeyRangeError(Error):
"""Error while trying to generate a KeyRange."""
class SimplejsonUnavailableError(Error):
"""Error using json functionality with unavailable json and simplejson."""
def _IsNdbQuery(query):
return ndb is not None and isinstance(query, ndb.Query)
class KeyRange(object):
"""Represents a range of keys in the datastore.
A KeyRange object represents a key range
(key_start, include_start, key_end, include_end)
and a scan direction (KeyRange.DESC or KeyRange.ASC).
"""
DESC = "DESC"
ASC = "ASC"
def __init__(self,
key_start=None,
key_end=None,
direction=None,
include_start=True,
include_end=True,
namespace=None,
_app=None):
"""Initialize a KeyRange object.
Args:
key_start: The starting key for this range (db.Key or ndb.Key).
key_end: The ending key for this range (db.Key or ndb.Key).
direction: The direction of the query for this range.
include_start: Whether the start key should be included in the range.
include_end: Whether the end key should be included in the range.
namespace: The namespace for this range. If None then the current
namespace is used.
NOTE: If NDB keys are passed in, they are converted to db.Key
instances before being stored.
"""
if direction is None:
direction = KeyRange.ASC
assert direction in (KeyRange.ASC, KeyRange.DESC)
self.direction = direction
if ndb is not None:
if isinstance(key_start, ndb.Key):
key_start = key_start.to_old_key()
if isinstance(key_end, ndb.Key):
key_end = key_end.to_old_key()
self.key_start = key_start
self.key_end = key_end
self.include_start = include_start
self.include_end = include_end
if namespace is not None:
self.namespace = namespace
else:
self.namespace = namespace_manager.get_namespace()
self._app = _app
def __str__(self):
if self.include_start:
left_side = "["
else:
left_side = "("
if self.include_end:
right_side = "]"
else:
right_side = ")"
return "%s%s%r to %r%s" % (self.direction, left_side, self.key_start,
self.key_end, right_side)
def __repr__(self):
return ("key_range.KeyRange(key_start=%r,key_end=%r,direction=%r,"
"include_start=%r,include_end=%r, namespace=%r)") % (
self.key_start,
self.key_end,
self.direction,
self.include_start,
self.include_end,
self.namespace)
def advance(self, key):
"""Updates the start of the range immediately past the specified key.
Args:
key: A db.Key or ndb.Key.
"""
self.include_start = False
if ndb is not None:
if isinstance(key, ndb.Key):
key = key.to_old_key()
self.key_start = key
def filter_query(self, query, filters=None):
"""Add query filter to restrict to this key range.
Args:
query: A db.Query or ndb.Query instance.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
The input query restricted to this key range.
"""
if ndb is not None:
if _IsNdbQuery(query):
return self.filter_ndb_query(query, filters=filters)
assert not _IsNdbQuery(query)
if filters:
for f in filters:
query.filter("%s %s" % (f[0], f[1]), f[2])
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.filter("__key__ %s" % start_comparator, self.key_start)
if self.key_end:
query.filter("__key__ %s" % end_comparator, self.key_end)
return query
def filter_ndb_query(self, query, filters=None):
"""Add query filter to restrict to this key range.
Args:
query: An ndb.Query instance.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
The input query restricted to this key range.
"""
assert _IsNdbQuery(query)
if filters:
for f in filters:
query = query.filter(ndb.FilterNode(*f))
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query = query.filter(ndb.FilterNode("__key__",
start_comparator,
self.key_start))
if self.key_end:
query = query.filter(ndb.FilterNode("__key__",
end_comparator,
self.key_end))
return query
def filter_datastore_query(self, query, filters=None):
"""Add query filter to restrict to this key range.
Args:
query: A datastore.Query instance.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
The input query restricted to this key range.
"""
assert isinstance(query, datastore.Query)
if filters:
for f in filters:
query.update({"%s %s" % (f[0], f[1]): f[2]})
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.update({"__key__ %s" % start_comparator: self.key_start})
if self.key_end:
query.update({"__key__ %s" % end_comparator: self.key_end})
return query
def __get_direction(self, asc, desc):
"""Check that self.direction is in (KeyRange.ASC, KeyRange.DESC).
Args:
asc: Argument to return if self.direction is KeyRange.ASC
desc: Argument to return if self.direction is KeyRange.DESC
Returns:
asc or desc appropriately
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
if self.direction == KeyRange.ASC:
return asc
elif self.direction == KeyRange.DESC:
return desc
else:
raise KeyRangeError("KeyRange direction unexpected: %s", self.direction)
def make_directed_query(self, kind_class, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind_class: A kind implementation class (a subclass of either
db.Model or ndb.Model).
keys_only: bool, default False, use keys_only on Query?
Returns:
A db.Query or ndb.Query instance (corresponding to kind_class).
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
if ndb is not None:
if issubclass(kind_class, ndb.Model):
return self.make_directed_ndb_query(kind_class, keys_only=keys_only)
assert self._app is None, '_app is not supported for db.Query'
direction = self.__get_direction("", "-")
query = db.Query(kind_class, namespace=self.namespace, keys_only=keys_only)
query.order("%s__key__" % direction)
query = self.filter_query(query)
return query
def make_directed_ndb_query(self, kind_class, keys_only=False):
"""Construct an NDB query for this key range, including the scan direction.
Args:
kind_class: An ndb.Model subclass.
keys_only: bool, default False, use keys_only on Query?
Returns:
An ndb.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
assert issubclass(kind_class, ndb.Model)
if keys_only:
default_options = ndb.QueryOptions(keys_only=True)
else:
default_options = None
query = kind_class.query(app=self._app,
namespace=self.namespace,
default_options=default_options)
query = self.filter_ndb_query(query)
if self.__get_direction(True, False):
query = query.order(kind_class._key)
else:
query = query.order(-kind_class._key)
return query
def make_directed_datastore_query(self, kind, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
Returns:
A datastore.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
direction = self.__get_direction(datastore.Query.ASCENDING,
datastore.Query.DESCENDING)
query = datastore.Query(kind, _app=self._app, keys_only=keys_only)
query.Order(("__key__", direction))
query = self.filter_datastore_query(query)
return query
def make_ascending_query(self, kind_class, keys_only=False, filters=None):
"""Construct a query for this key range without setting the scan direction.
Args:
kind_class: A kind implementation class (a subclass of either
db.Model or ndb.Model).
keys_only: bool, default False, query only for keys.
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
A db.Query or ndb.Query instance (corresponding to kind_class).
"""
if ndb is not None:
if issubclass(kind_class, ndb.Model):
return self.make_ascending_ndb_query(
kind_class, keys_only=keys_only, filters=filters)
assert self._app is None, '_app is not supported for db.Query'
query = db.Query(kind_class, namespace=self.namespace, keys_only=keys_only)
query.order("__key__")
query = self.filter_query(query, filters=filters)
return query
def make_ascending_ndb_query(self, kind_class, keys_only=False, filters=None):
"""Construct an NDB query for this key range, without the scan direction.
Args:
kind_class: An ndb.Model subclass.
keys_only: bool, default False, query only for keys.
Returns:
An ndb.Query instance.
"""
assert issubclass(kind_class, ndb.Model)
if keys_only:
default_options = ndb.QueryOptions(keys_only=True)
else:
default_options = None
query = kind_class.query(app=self._app,
namespace=self.namespace,
default_options=default_options)
query = self.filter_ndb_query(query, filters=filters)
query = query.order(kind_class._key)
return query
def make_ascending_datastore_query(self, kind, keys_only=False, filters=None):
"""Construct a query for this key range without setting the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
filters: optional list of filters to apply to the query. Each filter is
a tuple: (<property_name_as_str>, <query_operation_as_str>, <value>).
User filters are applied first.
Returns:
A datastore.Query instance.
"""
query = datastore.Query(kind,
namespace=self.namespace,
_app=self._app,
keys_only=keys_only)
query.Order(("__key__", datastore.Query.ASCENDING))
query = self.filter_datastore_query(query, filters=filters)
return query
def split_range(self, batch_size=0):
"""Split this key range into a list of at most two ranges.
This method attempts to split the key range approximately in half.
Numeric ranges are split in the middle into two equal ranges and
string ranges are split lexicographically in the middle. If the
key range is smaller than batch_size it is left unsplit.
Note that splitting is done without knowledge of the distribution
of actual entities in the key range, so there is no guarantee (nor
any particular reason to believe) that the entities of the range
are evenly split.
Args:
batch_size: The maximum size of a key range that should not be split.
Returns:
A list of one or two key ranges covering the same space as this range.
"""
key_start = self.key_start
key_end = self.key_end
include_start = self.include_start
include_end = self.include_end
key_pairs = []
if not key_start:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.ASC))
elif not key_end:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.DESC))
else:
key_split = KeyRange.split_keys(key_start, key_end, batch_size)
first_include_end = True
if key_split == key_start:
first_include_end = first_include_end and include_start
key_pairs.append((key_start, include_start,
key_split, first_include_end,
KeyRange.DESC))
second_include_end = include_end
if key_split == key_end:
second_include_end = False
key_pairs.append((key_split, False,
key_end, second_include_end,
KeyRange.ASC))
ranges = [KeyRange(key_start=start,
include_start=include_start,
key_end=end,
include_end=include_end,
direction=direction,
namespace=self.namespace,
_app=self._app)
for (start, include_start, end, include_end, direction)
in key_pairs]
return ranges
def __hash__(self):
raise TypeError('KeyRange is unhashable')
def __cmp__(self, other):
"""Compare two key ranges.
Key ranges with a value of None for key_start or key_end, are always
considered to have include_start=False or include_end=False, respectively,
when comparing. Since None indicates an unbounded side of the range,
the include specifier is meaningless. The ordering generated is total
but somewhat arbitrary.
Args:
other: An object to compare to this one.
Returns:
-1: if this key range is less than other.
0: if this key range is equal to other.
1: if this key range is greater than other.
"""
if not isinstance(other, KeyRange):
return 1
self_list = [self.key_start, self.key_end, self.direction,
self.include_start, self.include_end, self._app,
self.namespace]
if not self.key_start:
self_list[3] = False
if not self.key_end:
self_list[4] = False
other_list = [other.key_start,
other.key_end,
other.direction,
other.include_start,
other.include_end,
other._app,
other.namespace]
if not other.key_start:
other_list[3] = False
if not other.key_end:
other_list[4] = False
return cmp(self_list, other_list)
@staticmethod
def bisect_string_range(start, end):
"""Returns a string that is approximately in the middle of the range.
(start, end) is treated as a string range, and it is assumed
start <= end in the usual lexicographic string ordering. The output key
mid is guaranteed to satisfy start <= mid <= end.
The method proceeds by comparing initial characters of start and
end. When the characters are equal, they are appended to the mid
string. In the first place that the characters differ, the
difference characters are averaged and this average is appended to
the mid string. If averaging resulted in rounding down, and
additional character is added to the mid string to make up for the
rounding down. This extra step is necessary for correctness in
the case that the average of the two characters is equal to the
character in the start string.
This method makes the assumption that most keys are ascii and it
attempts to perform splitting within the ascii range when that
results in a valid split.
Args:
start: A string.
end: A string such that start <= end.
Returns:
A string mid such that start <= mid <= end.
"""
if start == end:
return start
start += "\0"
end += "\0"
midpoint = []
expected_max = 127
for i in xrange(min(len(start), len(end))):
if start[i] == end[i]:
midpoint.append(start[i])
else:
ord_sum = ord(start[i]) + ord(end[i])
midpoint.append(unichr(ord_sum / 2))
if ord_sum % 2:
if len(start) > i + 1:
ord_start = ord(start[i+1])
else:
ord_start = 0
if ord_start < expected_max:
ord_split = (expected_max + ord_start) / 2
else:
ord_split = (0xFFFF + ord_start) / 2
midpoint.append(unichr(ord_split))
break
return "".join(midpoint)
@staticmethod
def split_keys(key_start, key_end, batch_size):
"""Return a key that is between key_start and key_end inclusive.
This method compares components of the ancestor paths of key_start
and key_end. The first place in the path that differs is
approximately split in half. If the kind components differ, a new
non-existent kind halfway between the two is used to split the
space. If the id_or_name components differ, then a new id_or_name
that is halfway between the two is selected. If the lower
id_or_name is numeric and the upper id_or_name is a string, then
the minumum string key u'\0' is used as the split id_or_name. The
key that is returned is the shared portion of the ancestor path
followed by the generated split component.
Args:
key_start: A db.Key or ndb.Key instance for the lower end of a range.
key_end: A db.Key or ndb.Key instance for the upper end of a range.
batch_size: The maximum size of a range that should not be split.
Returns:
A db.Key instance, k, such that key_start <= k <= key_end.
NOTE: Even though ndb.Key instances are accepted as arguments,
the return value is always a db.Key instance.
"""
if ndb is not None:
if isinstance(key_start, ndb.Key):
key_start = key_start.to_old_key()
if isinstance(key_end, ndb.Key):
key_end = key_end.to_old_key()
assert key_start.app() == key_end.app()
assert key_start.namespace() == key_end.namespace()
path1 = key_start.to_path()
path2 = key_end.to_path()
len1 = len(path1)
len2 = len(path2)
assert len1 % 2 == 0
assert len2 % 2 == 0
out_path = []
min_path_len = min(len1, len2) / 2
for i in xrange(min_path_len):
kind1 = path1[2*i]
kind2 = path2[2*i]
if kind1 != kind2:
split_kind = KeyRange.bisect_string_range(kind1, kind2)
out_path.append(split_kind)
out_path.append(unichr(0))
break
last = (len1 == len2 == 2*(i + 1))
id_or_name1 = path1[2*i + 1]
id_or_name2 = path2[2*i + 1]
id_or_name_split = KeyRange._split_id_or_name(
id_or_name1, id_or_name2, batch_size, last)
if id_or_name1 == id_or_name_split:
out_path.append(kind1)
out_path.append(id_or_name1)
else:
out_path.append(kind1)
out_path.append(id_or_name_split)
break
return db.Key.from_path(
*out_path,
**{"_app": key_start.app(), "namespace": key_start.namespace()})
@staticmethod
def _split_id_or_name(id_or_name1, id_or_name2, batch_size, maintain_batches):
"""Return an id_or_name that is between id_or_name1 an id_or_name2.
Attempts to split the range [id_or_name1, id_or_name2] in half,
unless maintain_batches is true and the size of the range
[id_or_name1, id_or_name2] is less than or equal to batch_size.
Args:
id_or_name1: A number or string or the id_or_name component of a key
id_or_name2: A number or string or the id_or_name component of a key
batch_size: The range size that will not be split if maintain_batches
is true.
maintain_batches: A boolean for whether to keep small ranges intact.
Returns:
An id_or_name such that id_or_name1 <= id_or_name <= id_or_name2.
"""
if (isinstance(id_or_name1, (int, long)) and
isinstance(id_or_name2, (int, long))):
if not maintain_batches or id_or_name2 - id_or_name1 > batch_size:
return (id_or_name1 + id_or_name2) / 2
else:
return id_or_name1
elif (isinstance(id_or_name1, basestring) and
isinstance(id_or_name2, basestring)):
return KeyRange.bisect_string_range(id_or_name1, id_or_name2)
else:
if (not isinstance(id_or_name1, (int, long)) or
not isinstance(id_or_name2, basestring)):
raise KeyRangeError("Wrong key order: %r, %r" %
(id_or_name1, id_or_name2))
zero_ch = unichr(0)
if id_or_name2 == zero_ch:
return (id_or_name1 + 2**63 - 1) / 2
return zero_ch
@staticmethod
def guess_end_key(kind,
key_start,
probe_count=30,
split_rate=5):
"""Guess the end of a key range with a binary search of probe queries.
When the 'key_start' parameter has a key hierarchy, this function will
only determine the key range for keys in a similar hierarchy. That means
if the keys are in the form:
kind=Foo, name=bar/kind=Stuff, name=meep
only this range will be probed:
kind=Foo, name=*/kind=Stuff, name=*
That means other entities of kind 'Stuff' that are children of another
parent entity kind will be skipped:
kind=Other, name=cookie/kind=Stuff, name=meep
Args:
key_start: The starting key of the search range. In most cases this
should be id = 0 or name = '\0'. May be db.Key or ndb.Key.
kind: String name of the entity kind.
probe_count: Optional, how many probe queries to run.
split_rate: Exponential rate to use for splitting the range on the
way down from the full key space. For smaller ranges this should
be higher so more of the keyspace is skipped on initial descent.
Returns:
db.Key that is guaranteed to be as high or higher than the
highest key existing for this Kind. Doing a query between 'key_start' and
this returned Key (inclusive) will contain all entities of this Kind.
NOTE: Even though an ndb.Key instance is accepted as argument,
the return value is always a db.Key instance.
"""
if ndb is not None:
if isinstance(key_start, ndb.Key):
key_start = key_start.to_old_key()
app = key_start.app()
namespace = key_start.namespace()
full_path = key_start.to_path()
for index, piece in enumerate(full_path):
if index % 2 == 0:
continue
elif isinstance(piece, basestring):
full_path[index] = u"\xffff"
else:
full_path[index] = 2**63 - 1
key_end = db.Key.from_path(*full_path,
**{"_app": app, "namespace": namespace})
split_key = key_end
for i in xrange(probe_count):
for j in xrange(split_rate):
split_key = KeyRange.split_keys(key_start, split_key, 1)
results = datastore.Query(
kind,
{"__key__ >": split_key},
namespace=namespace,
_app=app,
keys_only=True).Get(1)
if results:
if results[0].name() and not key_start.name():
return KeyRange.guess_end_key(
kind, results[0], probe_count - 1, split_rate)
else:
split_rate = 1
key_start = results[0]
split_key = key_end
else:
key_end = split_key
return key_end
def to_json(self):
"""Serialize KeyRange to json.
Returns:
string with KeyRange json representation.
"""
if simplejson is None:
raise SimplejsonUnavailableError(
"JSON functionality requires json or simplejson to be available")
def key_to_str(key):
if key:
return str(key)
else:
return None
obj_dict = {
"direction": self.direction,
"key_start": key_to_str(self.key_start),
"key_end": key_to_str(self.key_end),
"include_start": self.include_start,
"include_end": self.include_end,
"namespace": self.namespace,
}
if self._app:
obj_dict["_app"] = self._app
return simplejson.dumps(obj_dict, sort_keys=True)
@staticmethod
def from_json(json_str):
"""Deserialize KeyRange from its json representation.
Args:
json_str: string with json representation created by key_range_to_json.
Returns:
deserialized KeyRange instance.
"""
if simplejson is None:
raise SimplejsonUnavailableError(
"JSON functionality requires json or simplejson to be available")
def key_from_str(key_str):
if key_str:
return db.Key(key_str)
else:
return None
json = simplejson.loads(json_str)
return KeyRange(key_from_str(json["key_start"]),
key_from_str(json["key_end"]),
json["direction"],
json["include_start"],
json["include_end"],
json.get("namespace"),
_app=json.get("_app"))
|
|
from __future__ import unicode_literals
import copy
import datetime
from django.db import models
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from audit_log.models.fields import LastUserField
try:
from django.utils.timezone import now as datetime_now
assert datetime_now
except ImportError:
import datetime
datetime_now = datetime.datetime.now
class LogEntryObjectDescriptor(object):
def __init__(self, model):
self.model = model
def __get__(self, instance, owner):
kwargs = dict((f.attname, getattr(instance, f.attname))
for f in self.model._meta.fields
if hasattr(instance, f.attname))
return self.model(**kwargs)
class AuditLogManager(models.Manager):
def __init__(self, model, attname, instance = None, ):
super(AuditLogManager, self).__init__()
self.model = model
self.instance = instance
self.attname = attname
#set a hidden attribute on the instance to control wether we should track changes
if instance is not None and not hasattr(instance, '__is_%s_enabled'%attname):
setattr(instance, '__is_%s_enabled'%attname, True)
def enable_tracking(self):
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
setattr(self.instance, '__is_%s_enabled'%self.attname, True)
def disable_tracking(self):
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
setattr(self.instance, '__is_%s_enabled'%self.attname, False)
def is_tracking_enabled(self):
if getattr(settings, 'DISABLE_AUDIT_LOG', False):
return False
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
return getattr(self.instance, '__is_%s_enabled'%self.attname)
def get_queryset(self):
if self.instance is None:
return super(AuditLogManager, self).get_queryset()
f = {self.instance._meta.pk.name : self.instance.pk}
return super(AuditLogManager, self).get_queryset().filter(**f)
class AuditLogDescriptor(object):
def __init__(self, model, manager_class, attname):
self.model = model
self.manager_class = manager_class
self.attname = attname
def __get__(self, instance, owner):
if instance is None:
return self.manager_class(self.model, self.attname)
return self.manager_class(self.model, self.attname, instance)
class AuditLog(object):
manager_class = AuditLogManager
def __init__(self, exclude = []):
self._exclude = exclude
def contribute_to_class(self, cls, name):
self.manager_name = name
models.signals.class_prepared.connect(self.finalize, sender = cls)
def create_log_entry(self, instance, action_type):
manager = getattr(instance, self.manager_name)
attrs = {}
for field in instance._meta.fields:
if field.attname not in self._exclude:
attrs[field.attname] = getattr(instance, field.attname)
manager.create(action_type = action_type, **attrs)
def post_save(self, instance, created, **kwargs):
#ignore if it is disabled
if getattr(instance, self.manager_name).is_tracking_enabled():
self.create_log_entry(instance, created and 'I' or 'U')
def post_delete(self, instance, **kwargs):
#ignore if it is disabled
if getattr(instance, self.manager_name).is_tracking_enabled():
self.create_log_entry(instance, 'D')
def finalize(self, sender, **kwargs):
log_entry_model = self.create_log_entry_model(sender)
models.signals.post_save.connect(self.post_save, sender = sender, weak = False)
models.signals.post_delete.connect(self.post_delete, sender = sender, weak = False)
descriptor = AuditLogDescriptor(log_entry_model, self.manager_class, self.manager_name)
setattr(sender, self.manager_name, descriptor)
def copy_fields(self, model):
"""
Creates copies of the fields we are keeping
track of for the provided model, returning a
dictionary mapping field name to a copied field object.
"""
fields = {'__module__' : model.__module__}
for field in model._meta.fields:
if not field.name in self._exclude:
field = copy.deepcopy(field)
if isinstance(field, models.AutoField):
#we replace the AutoField of the original model
#with an IntegerField because a model can
#have only one autofield.
field.__class__ = models.IntegerField
if field.primary_key:
field.serialize = True
#OneToOne fields should really be tracked
#as ForeignKey fields
if isinstance(field, models.OneToOneField):
field.__class__ = models.ForeignKey
if field.primary_key or field.unique:
#unique fields of the original model
#can not be guaranteed to be unique
#in the audit log entry but they
#should still be indexed for faster lookups.
field.primary_key = False
field._unique = False
field.db_index = True
if field.rel and field.rel.related_name:
field.rel.related_name = '_auditlog_%s' % field.rel.related_name
fields[field.name] = field
return fields
def get_logging_fields(self, model):
"""
Returns a dictionary mapping of the fields that are used for
keeping the acutal audit log entries.
"""
rel_name = '_%s_audit_log_entry'%model._meta.object_name.lower()
def entry_instance_to_unicode(log_entry):
try:
result = '%s: %s %s at %s'%(model._meta.object_name,
log_entry.object_state,
log_entry.get_action_type_display().lower(),
log_entry.action_date,
)
except AttributeError:
result = '%s %s at %s'%(model._meta.object_name,
log_entry.get_action_type_display().lower(),
log_entry.action_date
)
return result
action_user_field = LastUserField(related_name = rel_name, editable = False)
#check if the manager has been attached to auth user model
if [model._meta.app_label, model.__name__] == getattr(settings, 'AUTH_USER_MODEL', 'auth.User').split("."):
action_user_field = LastUserField(related_name = rel_name, editable = False, to = 'self')
return {
'action_id' : models.AutoField(primary_key = True),
'action_date' : models.DateTimeField(default = datetime_now, editable = False, blank=False),
'action_user' : action_user_field,
'action_type' : models.CharField(max_length = 1, editable = False, choices = (
('I', _('Created')),
('U', _('Changed')),
('D', _('Deleted')),
)),
'object_state' : LogEntryObjectDescriptor(model),
'__unicode__' : entry_instance_to_unicode,
}
def get_meta_options(self, model):
"""
Returns a dictionary of Meta options for the
autdit log model.
"""
def convert(name):
import re
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
db_table = ''
if model._meta.app_label:
db_table = model._meta.app_label + '_'
db_table += '%s_history' % convert(model._meta.object_name)
result = {
'ordering' : ('-action_date',),
'app_label' : model._meta.app_label,
'db_table' : db_table,
}
from django.db.models.options import DEFAULT_NAMES
if 'default_permissions' in DEFAULT_NAMES:
result.update({'default_permissions': ()})
return result
def create_log_entry_model(self, model):
"""
Creates a log entry model that will be associated with
the model provided.
"""
attrs = self.copy_fields(model)
attrs.update(self.get_logging_fields(model))
attrs.update(Meta = type(str('Meta'), (), self.get_meta_options(model)))
name = str('%sHistory'%model._meta.object_name)
return type(name, (models.Model,), attrs)
|
|
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import permission_required
from django.contrib.contenttypes.models import ContentType
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.template.loader import render_to_string
from django.utils import timezone
from django.views.generic.edit import UpdateView
from html.parser import HTMLParser
import logging
from smtplib import SMTPRecipientsRefused
import hashlib
import os
import html2text
from janeus import Janeus
from . import models
from . import forms
try:
from mezzanine.utils.sites import current_site_id
except:
from .siterelated import current_site_id
logger = logging.getLogger(__name__)
def view_home(request):
if getattr(settings, 'RECAPTCHA', False):
if request.method == 'POST':
form = forms.SubscriptionEmailRecaptchaForm(request.POST)
else:
form = forms.SubscriptionEmailRecaptchaForm()
else:
if request.method == 'POST':
form = forms.SubscriptionEmailForm(request.POST)
else:
form = forms.SubscriptionEmailForm()
if request.method == 'POST':
if form.is_valid():
email = form.cleaned_data['email']
if getattr(settings, 'SKIP_EMAIL', False):
email_to_send, attachments = compose_mail(email, False, request=request)
return HttpResponse(email_to_send, content_type='text/html')
else:
email_to_send, attachments = compose_mail(email, True, request=request)
send_an_email(email, email_to_send, attachments)
return render(request, 'hemres/subscriptions_emailsent.html', {'email': email})
return render(request, 'hemres/home.html', {'form': form})
def subscriptions_done(request):
return render(request, 'hemres/subscriptions_manage_done.html')
def unsubscribe_landing(request, token):
sub = get_object_or_404(models.Subscriber, unsubscribe_token=token)
send_token_url = request.build_absolute_uri(reverse(unsubscribe_sendmail, kwargs={'token': token}))
for_real_url = request.build_absolute_uri(reverse(unsubscribe_unsub, kwargs={'token': token}))
return render(request, 'hemres/unsubscribe_landing.html',
{'send_token_url': send_token_url, 'for_real_url': for_real_url, 'name': sub.name})
def compose_unsubscribed_mail(request, name, embed):
home_url = request.build_absolute_uri(reverse(view_home))
context = {'home_url': home_url,
'render_mail': embed,
'attachments': {},
'name': name}
result = render_to_string('hemres/unsubscribe_email.html', context)
return result, [mime for mime, cid in list(context['attachments'].values())]
def unsubscribe_sendmail(request, token):
sub = get_object_or_404(models.Subscriber, unsubscribe_token=token).cast()
if type(sub) is models.EmailSubscriber:
email = sub.email
elif type(sub) is models.JaneusSubscriber:
if not hasattr(settings, 'JANEUS_SERVER'):
raise Http404()
res = Janeus().by_lidnummer(sub.member_id)
if res is None:
raise Http404()
dn, attrs = res
email = attrs['mail'][0]
if isinstance(email, bytes):
email = str(email, 'utf-8')
else:
raise Http404()
if getattr(settings, 'SKIP_EMAIL', False):
email_to_send, attachments = compose_mail(email, False, request=request)
return HttpResponse(email_to_send, content_type='text/html')
else:
email_to_send, attachments = compose_mail(email, True, request=request)
send_an_email(email, email_to_send, attachments)
return render(request, 'hemres/subscriptions_emailsent.html', {'email': email})
def unsubscribe_unsub(request, token):
sub = get_object_or_404(models.Subscriber, unsubscribe_token=token).cast()
# get data to send email after
if type(sub) is models.EmailSubscriber:
name = sub.name
email = sub.email
elif type(sub) is models.JaneusSubscriber:
if not hasattr(settings, 'JANEUS_SERVER'):
sub.delete()
raise Http404()
res = Janeus().by_lidnummer(sub.member_id)
if res is None:
sub.delete()
raise Http404()
dn, attrs = res
name = sub.name
email = attrs['mail'][0]
# now delete the subscriber
sub.delete()
# now send an email
if getattr(settings, 'SKIP_EMAIL', False):
email_to_send, attachments = compose_unsubscribed_mail(request, name, False)
return HttpResponse(email_to_send, content_type='text/html')
else:
email_to_send, attachments = compose_unsubscribed_mail(request, name, True)
send_an_email(email, email_to_send, attachments)
return render(request, 'hemres/subscriptions_manage_done.html')
class ManageEmailSubscriptions(UpdateView):
model = models.EmailSubscriber
form_class = forms.EmailSubscriberForm
template_name = 'hemres/subscriptions_manage_email.html'
def get_success_url(self):
return reverse(subscriptions_done)
def get_object(self, *args, **kwargs):
subscriber = self.kwargs['subscriber']
token = self.kwargs['token']
accesstoken = models.EmailSubscriberAccessToken.objects.filter(pk=int(subscriber)).filter(token=token).filter(expiration_date__gt=timezone.now())
# check expire
if len(accesstoken) == 0:
raise Http404()
return accesstoken[0].subscriber
class ManageJaneusSubscriptions(UpdateView):
model = models.JaneusSubscriber
form_class = forms.JaneusSubscriberForm
template_name = 'hemres/subscriptions_manage_janeus.html'
def get_success_url(self):
return reverse(subscriptions_done)
def get_object(self, *args, **kwargs):
subscriber = self.kwargs['subscriber']
token = self.kwargs['token']
accesstoken = models.JaneusSubscriberAccessToken.objects.filter(pk=int(subscriber)).filter(token=token).filter(expiration_date__gt=timezone.now())
# check expire
if len(accesstoken) == 0:
raise Http404()
accesstoken[0].subscriber.update_janeus_newsletters()
return accesstoken[0].subscriber
def make_janeus_subscriber(members):
member_id, name = members
s = models.JaneusSubscriber.objects.filter(member_id=int(member_id)).select_related('token')
if len(s) == 0:
s = [models.JaneusSubscriber(member_id=int(member_id), janeus_name=name, name=name)]
s[0].save()
return s[0]
def create_fresh_janeus_token(subscriber):
if hasattr(subscriber, 'token'):
subscriber.token.delete()
token = hashlib.sha256(os.urandom(64)).hexdigest()
t = models.JaneusSubscriberAccessToken(subscriber=subscriber, token=token)
t.save()
return t
def create_fresh_email_token(subscriber):
if hasattr(subscriber, 'token'):
subscriber.token.delete()
token = hashlib.sha256(os.urandom(64)).hexdigest()
t = models.EmailSubscriberAccessToken(subscriber=subscriber, token=token)
t.save()
return t
def send_an_email(emailaddress, html_content, attachments):
h = html2text.HTML2Text()
h.ignore_images = True
text_content = h.handle(html_content)
subject = 'Jonge Democraten Nieuwsbrieven'
from_email = getattr(settings, 'HEMRES_FROM_ADDRESS', '[email protected]')
msg = EmailMultiAlternatives(subject=subject, body=text_content, from_email=from_email, to=[emailaddress])
msg.attach_alternative(html_content, "text/html")
msg.mixed_subtype = 'related'
for a in attachments:
msg.attach(a)
try:
msg.send()
except SMTPRecipientsRefused:
pass
@transaction.atomic
def compose_mail(emailaddress, embed, request):
# find Janeus users
if hasattr(settings, 'JANEUS_SERVER'):
janeus_subscribers = [make_janeus_subscriber(s) for s in Janeus().lidnummers(emailaddress)]
else:
janeus_subscribers = []
email_subscribers = models.EmailSubscriber.objects.filter(email=emailaddress).select_related('token') # case sensitive!
if len(janeus_subscribers) == 0 and len(email_subscribers) == 0:
email_subscribers = [models.EmailSubscriber(name='', email=emailaddress)]
email_subscribers[0].save()
# create tokens
janeus_subscribers_tokens = [create_fresh_janeus_token(s) for s in janeus_subscribers]
email_subscribers_tokens = [create_fresh_email_token(s) for s in email_subscribers]
if len(janeus_subscribers) == 1 and len(email_subscribers) == 0:
name = janeus_subscribers[0].name
else:
name = None
absolute_uri = '%s://%s' % (request.scheme, request.get_host())
context = {'janeus_subscriber_tokens': janeus_subscribers_tokens,
'email_subscriber_tokens': email_subscribers_tokens,
'attachments': {},
'render_mail': embed,
'absolute_uri': absolute_uri,
'name': name}
result = render_to_string('hemres/subscriptions_email.html', context)
return result, [mime for mime, cid in list(context['attachments'].values())]
def view_newsletter(request, newsletter_pk):
if request.user.is_active and request.user.is_staff:
newsletter = get_object_or_404(models.Newsletter, pk=newsletter_pk)
else:
# all newsletters SENT TO A LIST at most a year ago
yearago = datetime.now() - timedelta(days=365)
newsletter = get_object_or_404(models.Newsletter.objects.filter(public=True).filter(newslettertolist__date__gt=yearago), pk=newsletter_pk)
subscriptions_url = request.build_absolute_uri(reverse(view_home))
email, attachments = newsletter.render('Naam', False, subscriptions_url)
return HttpResponse(email, content_type="text/html")
@staff_member_required
def test_newsletter(request, pk):
newsletter = get_object_or_404(models.Newsletter, pk=pk)
if request.method == 'POST':
form = forms.TestEmailForm(request.POST)
else:
form = forms.TestEmailForm()
if request.method == 'POST':
if form.is_valid():
address = form.cleaned_data['email']
subscriptions_url = request.build_absolute_uri(reverse(view_home))
subject = "[Test] {}".format(newsletter.subject)
html_content, attachments = newsletter.render('', True, subscriptions_url)
h = html2text.HTML2Text()
h.ignore_images = True
text_content = h.handle(html_content)
from_email = getattr(settings, 'HEMRES_FROM_ADDRESS', '[email protected]')
msg = EmailMultiAlternatives(subject=subject, body=text_content, from_email=from_email, to=[address])
msg.attach_alternative(html_content, "text/html")
msg.mixed_subtype = 'related'
for a in attachments:
msg.attach(a)
if getattr(settings, 'SKIP_EMAIL', False):
return HttpResponse(msg.message().as_string(), content_type="message")
else:
msg.send()
content_type = ContentType.objects.get_for_model(newsletter.__class__)
return redirect(reverse('admin:%s_%s_changelist' % (content_type.app_label, content_type.model)))
return render(request, 'hemres/test_newsletter.html', {'form': form, 'nieuwsbrief': str(newsletter)})
@staff_member_required
@permission_required('hemres.add_newsletter')
def prepare_sending(request, pk):
newsletter = get_object_or_404(models.Newsletter, pk=pk)
if request.method == 'POST':
form = forms.PrepareSendingForm(request.POST)
else:
form = forms.PrepareSendingForm()
if request.method == 'POST':
if form.is_valid():
subscriptions_url = request.build_absolute_uri(reverse(unsubscribe_landing, kwargs={'token': 'DUMMYTOKEN'}))
newsletter.prepare_sending(form.cleaned_data['lists'], subscriptions_url)
content_type = ContentType.objects.get_for_model(newsletter.__class__)
return redirect(reverse('admin:%s_%s_changelist' % (content_type.app_label, content_type.model)))
return render(request, 'hemres/prepare_sending.html', {'form': form, 'nieuwsbrief': str(newsletter)})
@staff_member_required
@permission_required('hemres.add_newslettertosubscriber')
def process_sending(request, pk):
newsletter_to_list = get_object_or_404(models.NewsletterToList, pk=pk)
newsletter_to_list.process()
content_type = ContentType.objects.get_for_model(models.NewsletterToList)
return redirect(reverse('admin:%s_%s_changelist' % (content_type.app_label, content_type.model)))
def list_all(request):
# Find all Newsletters of the current site
site_id = current_site_id()
if request.user.is_active and request.user.is_staff:
letters = models.Newsletter.objects.filter(site__id__exact=site_id)
letters = models.NewsletterToList.objects.order_by('-date').values('target_list__name', 'newsletter_id','newsletter__subject','date').filter(newsletter__in=letters)
else:
yearago = datetime.now() - timedelta(days=365)
letters = models.Newsletter.objects.filter(site__id__exact=site_id)
letters = models.NewsletterToList.objects.filter(target_list__janeus_groups_required='', newsletter__public=True,date__gt=yearago).order_by('-date').values('target_list__name', 'newsletter_id','newsletter__subject','date').filter(newsletter__in=letters)
letters = [{'id': s['newsletter_id'], 'subject': '[{}] {}'.format(s['target_list__name'], s['newsletter__subject']), 'date': s['date']} for s in letters]
return render(request, 'hemres/list.html', {'letters': letters})
class CSSExtract(HTMLParser):
style = False
data = ""
def handle_starttag(self, tag, attrs):
self.style = tag == "style"
def handle_endtag(self, tag):
self.style = False
def handle_data(self, data):
if self.style:
self.data += data
@staff_member_required
def get_css(request, pk):
newsletter = get_object_or_404(models.Newsletter, pk=pk)
parser = CSSExtract()
parser.feed(newsletter.template)
return HttpResponse(parser.data, content_type="text/css")
|
|
import numpy as np
import matplotlib.pyplot as pl
from matplotlib import rcParams
from scvelo.tools.utils import groups_to_bool
from scvelo.tools.velocity_embedding import velocity_embedding
from .docs import doc_params, doc_scatter
from .scatter import scatter
from .utils import (
default_basis,
default_color,
default_size,
get_ax,
get_basis,
get_components,
get_figure_params,
make_unique_list,
savefig_or_show,
velocity_embedding_changed,
)
from .velocity_embedding_grid import compute_velocity_on_grid
@doc_params(scatter=doc_scatter)
def velocity_embedding_stream(
adata,
basis=None,
vkey="velocity",
density=2,
smooth=None,
min_mass=None,
cutoff_perc=None,
arrow_color=None,
arrow_size=1,
arrow_style="-|>",
max_length=4,
integration_direction="both",
linewidth=None,
n_neighbors=None,
recompute=None,
color=None,
use_raw=None,
layer=None,
color_map=None,
colorbar=True,
palette=None,
size=None,
alpha=0.3,
perc=None,
X=None,
V=None,
X_grid=None,
V_grid=None,
sort_order=True,
groups=None,
components=None,
legend_loc="on data",
legend_fontsize=None,
legend_fontweight=None,
xlabel=None,
ylabel=None,
title=None,
fontsize=None,
figsize=None,
dpi=None,
frameon=None,
show=None,
save=None,
ax=None,
ncols=None,
**kwargs,
):
"""\
Stream plot of velocities on the embedding.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
density: `float` (default: 2)
Controls the closeness of streamlines. When density = 2 (default), the domain
is divided into a 60x60 grid, whereas density linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use a tuple (density_x, density_y).
smooth: `float` (default: 0.5)
Multiplication factor for scale in Gaussian kernel around grid point.
min_mass: `float` (default: 1)
Minimum threshold for mass to be shown.
It can range between 0 (all velocities) and 5 (large velocities only).
cutoff_perc: `float` (default: `None`)
If set, mask small velocities below a percentile threshold (between 0 and 100).
linewidth: `float` (default: 1)
Line width for streamplot.
arrow_color: `str` or 2D array (default: 'k')
The streamline color. If given an array, it must have the same shape as u and v.
arrow_size: `float` (default: 1)
Scaling factor for the arrow size.
arrow_style: `str` (default: '-|>')
Arrow style specification, '-|>' or '->'.
max_length: `float` (default: 4)
Maximum length of streamline in axes coordinates.
integration_direction: `str` (default: 'both')
Integrate the streamline in 'forward', 'backward' or 'both' directions.
n_neighbors: `int` (default: None)
Number of neighbors to consider around grid point.
X: `np.ndarray` (default: None)
Embedding coordinates. Using `adata.obsm['X_umap']` per default.
V: `np.ndarray` (default: None)
Embedding velocity coordinates. Using `adata.obsm['velocity_umap']` per default.
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False`
"""
basis = default_basis(adata, **kwargs) if basis is None else get_basis(adata, basis)
if vkey == "all":
lkeys = list(adata.layers.keys())
vkey = [key for key in lkeys if "velocity" in key and "_u" not in key]
color, color_map = kwargs.pop("c", color), kwargs.pop("cmap", color_map)
colors = make_unique_list(color, allow_array=True)
layers, vkeys = make_unique_list(layer), make_unique_list(vkey)
if V is None:
for key in vkeys:
if recompute or velocity_embedding_changed(adata, basis=basis, vkey=key):
velocity_embedding(adata, basis=basis, vkey=key)
color, layer, vkey = colors[0], layers[0], vkeys[0]
color = default_color(adata) if color is None else color
if X_grid is None or V_grid is None:
_adata = (
adata[groups_to_bool(adata, groups, groupby=color)]
if groups is not None and color in adata.obs.keys()
else adata
)
comps, obsm = get_components(components, basis), _adata.obsm
X_emb = np.array(obsm[f"X_{basis}"][:, comps]) if X is None else X[:, :2]
V_emb = np.array(obsm[f"{vkey}_{basis}"][:, comps]) if V is None else V[:, :2]
X_grid, V_grid = compute_velocity_on_grid(
X_emb=X_emb,
V_emb=V_emb,
density=1,
smooth=smooth,
min_mass=min_mass,
n_neighbors=n_neighbors,
autoscale=False,
adjust_for_stream=True,
cutoff_perc=cutoff_perc,
)
lengths = np.sqrt((V_grid ** 2).sum(0))
linewidth = 1 if linewidth is None else linewidth
linewidth *= 2 * lengths / lengths[~np.isnan(lengths)].max()
scatter_kwargs = {
"basis": basis,
"perc": perc,
"use_raw": use_raw,
"sort_order": sort_order,
"alpha": alpha,
"components": components,
"legend_loc": legend_loc,
"groups": groups,
"legend_fontsize": legend_fontsize,
"legend_fontweight": legend_fontweight,
"palette": palette,
"color_map": color_map,
"frameon": frameon,
"xlabel": xlabel,
"ylabel": ylabel,
"colorbar": colorbar,
"dpi": dpi,
"fontsize": fontsize,
"show": False,
"save": False,
}
stream_kwargs = {
"linewidth": linewidth,
"density": density or 2,
"zorder": 3,
"arrow_color": arrow_color or "k",
"arrowsize": arrow_size or 1,
"arrowstyle": arrow_style or "-|>",
"maxlength": max_length or 4,
"integration_direction": integration_direction or "both",
}
multikey = (
colors
if len(colors) > 1
else layers
if len(layers) > 1
else vkeys
if len(vkeys) > 1
else None
)
if multikey is not None:
if title is None:
title = list(multikey)
elif isinstance(title, (list, tuple)):
title *= int(np.ceil(len(multikey) / len(title)))
ncols = len(multikey) if ncols is None else min(len(multikey), ncols)
nrows = int(np.ceil(len(multikey) / ncols))
figsize = rcParams["figure.figsize"] if figsize is None else figsize
figsize, dpi = get_figure_params(figsize, dpi, ncols)
gs_figsize = (figsize[0] * ncols, figsize[1] * nrows)
ax = []
for i, gs in enumerate(
pl.GridSpec(nrows, ncols, pl.figure(None, gs_figsize, dpi=dpi))
):
if i < len(multikey):
ax.append(
velocity_embedding_stream(
adata,
size=size,
smooth=smooth,
n_neighbors=n_neighbors,
ax=pl.subplot(gs),
color=colors[i] if len(colors) > 1 else color,
layer=layers[i] if len(layers) > 1 else layer,
vkey=vkeys[i] if len(vkeys) > 1 else vkey,
title=title[i] if isinstance(title, (list, tuple)) else title,
X_grid=None if len(vkeys) > 1 else X_grid,
V_grid=None if len(vkeys) > 1 else V_grid,
**scatter_kwargs,
**stream_kwargs,
**kwargs,
)
)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
else:
ax, show = get_ax(ax, show, figsize, dpi)
for arg in list(kwargs):
if arg in stream_kwargs:
stream_kwargs.update({arg: kwargs[arg]})
else:
scatter_kwargs.update({arg: kwargs[arg]})
stream_kwargs["color"] = stream_kwargs.pop("arrow_color", "k")
ax.streamplot(X_grid[0], X_grid[1], V_grid[0], V_grid[1], **stream_kwargs)
size = 8 * default_size(adata) if size is None else size
ax = scatter(
adata,
layer=layer,
color=color,
size=size,
title=title,
ax=ax,
zorder=0,
**scatter_kwargs,
)
savefig_or_show(dpi=dpi, save=save, show=show)
if show is False:
return ax
|
|
import datetime
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
import pytz
from django.test import TestCase
from django.utils import timezone
from django.contrib.auth.models import User
from schedule.models import Event, Rule, Calendar, EventRelation
class TestEvent(TestCase):
def setUp(self):
cal = Calendar(name="MyCal")
cal.save()
def __create_event(self, title, start, end, cal):
return Event(**{
'title': title,
'start': start,
'end': end,
'calendar': cal
})
def __create_recurring_event(self, title, start, end, end_recurring, rule, cal):
return Event(**{
'title': title,
'start': start,
'end': end,
'end_recurring_period': end_recurring,
'rule': rule,
'calendar': cal
})
def test_edge_case_events(self):
cal = Calendar(name="MyCal")
cal.save()
data_1 = {
'title': 'Edge case event test one',
'start': datetime.datetime(2013, 1, 5, 8, 0, tzinfo=pytz.utc),
'end': datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
'calendar': cal
}
data_2 = {
'title': 'Edge case event test two',
'start': datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
'end': datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc),
'calendar': cal
}
event_one = Event(**data_1)
event_two = Event(**data_2)
event_one.save()
event_two.save()
occurrences_two = event_two.get_occurrences(datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc))
self.assertEqual(1, len(occurrences_two))
occurrences_one = event_one.get_occurrences(datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc))
self.assertEqual(0, len(occurrences_one))
def test_recurring_event_get_occurrences(self):
cal = Calendar(name="MyCal")
cal.save()
rule = Rule(frequency="WEEKLY")
rule.save()
recurring_event = self.__create_recurring_event(
'Recurrent event test get_occurrence',
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
recurring_event.save()
occurrences = recurring_event.get_occurrences(start=datetime.datetime(2008, 1, 12, 0, 0, tzinfo=pytz.utc),
end=datetime.datetime(2008, 1, 20, 0, 0, tzinfo=pytz.utc))
self.assertEqual(["%s to %s" % (o.start, o.end) for o in occurrences],
['2008-01-12 08:00:00+00:00 to 2008-01-12 09:00:00+00:00',
'2008-01-19 08:00:00+00:00 to 2008-01-19 09:00:00+00:00'])
def test_event_get_occurrences_after(self):
cal = Calendar(name="MyCal")
cal.save()
rule = Rule(frequency="WEEKLY")
rule.save()
self.__create_recurring_event(
'Recurrent event test get_occurrence',
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
event_one = self.__create_event(
'Edge case event test one',
datetime.datetime(2013, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
cal
)
event_two = self.__create_event(
'Edge case event test two',
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc),
cal
)
event_one.save()
event_two.save()
occurrences_two = event_two.get_occurrences(
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc))
self.assertEqual(1, len(occurrences_two))
occurrences_one = event_one.get_occurrences(
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 12, 0, tzinfo=pytz.utc))
self.assertEqual(0, len(occurrences_one))
def test_recurring_event_get_occurrences_2(self):
cal = Calendar(name="MyCal")
cal.save()
rule = Rule(frequency="WEEKLY")
rule.save()
recurring_event = self.__create_recurring_event(
'Recurring event test',
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal
)
recurring_event.save()
occurrences = recurring_event.get_occurrences(
start=datetime.datetime(2008, 1, 12, 0, 0, tzinfo=pytz.utc),
end=datetime.datetime(2008, 1, 20, 0, 0, tzinfo=pytz.utc))
self.assertEqual(["%s to %s" %(o.start, o.end) for o in occurrences],
['2008-01-12 08:00:00+00:00 to 2008-01-12 09:00:00+00:00', '2008-01-19 08:00:00+00:00 to 2008-01-19 09:00:00+00:00'])
def test_recurring_event_get_occurrences_after(self):
cal = Calendar(name="MyCal")
cal.save()
rule = Rule(frequency="WEEKLY")
rule.save()
recurring_event= self.__create_recurring_event(
'Recurrent event test get_occurrence',
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
recurring_event.save()
#occurrences = recurring_event.get_occurrences(start=datetime.datetime(2008, 1, 5, tzinfo=pytz.utc),
# end = datetime.datetime(2008, 1, 6, tzinfo=pytz.utc))
#occurrence = occurrences[0]
#occurrence2 = recurring_event.occurrences_after(datetime.datetime(2008, 1, 5, tzinfo=pytz.utc)).next()
#self.assertEqual(occurrence, occurrence2)
def test_recurring_event_with_moved_get_occurrences_after(self):
cal = Calendar(name="MyCal")
cal.save()
rule = Rule(frequency="WEEKLY")
rule.save()
recurring_event= self.__create_recurring_event(
'Recurrent event test get_occurrence',
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
recurring_event.save()
occurrence = recurring_event.get_occurrence(datetime.datetime(2008, 1, 12, 8, 0, tzinfo=pytz.utc))
occurrence.move(
datetime.datetime(2008, 1, 15, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 15, 9, 0, tzinfo=pytz.utc))
gen = recurring_event.occurrences_after(
datetime.datetime(2008, 1, 14, 8, 0, tzinfo=pytz.utc))
occurrence2 = next(gen)
# end = datetime.datetime(2008, 1, 6, tzinfo=pytz.utc))
#occurrence = occurrences[0]
#occurrence2 = recurring_event.occurrences_after(datetime.datetime(2008, 1, 5, tzinfo=pytz.utc)).next()
self.assertEqual(occurrence, occurrence2)
def test_recurring_event_get_occurrence(self):
cal = Calendar(name="MyCal")
cal.save()
rule = Rule(frequency="WEEKLY")
rule.save()
event = self.__create_recurring_event(
'Recurrent event test get_occurrence',
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
event.save()
occurrence = event.get_occurrence(datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc))
self.assertEqual(occurrence.start, datetime.datetime(2008, 1, 5, 8, tzinfo=pytz.utc))
occurrence.save()
occurrence = event.get_occurrence(datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc))
self.assertTrue(occurrence.pk is not None)
def test_prevent_type_error_when_comparing_naive_and_aware_dates(self):
# this only test if the TypeError is raised
cal = Calendar(name="MyCal")
cal.save()
rule = Rule(frequency="WEEKLY")
rule.save()
event = self.__create_recurring_event(
'Recurrent event test get_occurrence',
datetime.datetime(2008, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 1, 5, 9, 0, tzinfo=pytz.utc),
datetime.datetime(2008, 5, 5, 0, 0, tzinfo=pytz.utc),
rule,
cal,
)
naive_date = datetime.datetime(2008, 1, 20, 0, 0)
self.assertIsNone(event.get_occurrence(naive_date))
@override_settings(USE_TZ=False)
def test_prevent_type_error_when_comparing_dates_when_tz_off(self):
cal = Calendar(name="MyCal")
cal.save()
rule = Rule(frequency="WEEKLY")
rule.save()
event = self.__create_recurring_event(
'Recurrent event test get_occurrence',
datetime.datetime(2008, 1, 5, 8, 0),
datetime.datetime(2008, 1, 5, 9, 0),
datetime.datetime(2008, 5, 5, 0, 0),
rule,
cal,
)
naive_date = datetime.datetime(2008, 1, 20, 0, 0)
self.assertIsNone(event.get_occurrence(naive_date))
def test_event_get_ocurrence(self):
cal = Calendar(name='MyCal')
cal.save()
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_event(
'Non recurring event test get_occurrence',
start,
start + datetime.timedelta(hours=1),
cal)
event.save()
occurrence = event.get_occurrence(start)
self.assertEqual(occurrence.start, start)
def test_occurences_after_with_no_params(self):
cal = Calendar(name='MyCal')
cal.save()
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_event(
'Non recurring event test get_occurrence',
start,
start + datetime.timedelta(hours=1),
cal)
event.save()
occurrences = list(event.occurrences_after())
self.assertEqual(len(occurrences), 1)
self.assertEqual(occurrences[0].start, start)
self.assertEqual(occurrences[0].end, start + datetime.timedelta(hours=1))
def test_occurences_with_recurrent_event_end_recurring_period_edge_case(self):
cal = Calendar(name='MyCal')
cal.save()
rule = Rule(frequency="DAILY")
rule.save()
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_recurring_event(
'Non recurring event test get_occurrence',
start,
start + datetime.timedelta(hours=1),
start + datetime.timedelta(days=10),
rule,
cal)
event.save()
occurrences = list(event.occurrences_after())
self.assertEqual(len(occurrences), 11)
def test_get_for_object(self):
user = User.objects.create_user('john', '[email protected]', 'johnpassword')
event_relations = list(Event.objects.get_for_object(user, 'owner', inherit=False))
self.assertEqual(len(event_relations), 0)
rule = Rule(frequency="DAILY")
rule.save()
cal = Calendar(name='MyCal')
cal.save()
event = self.__create_event(
'event test',
datetime.datetime(2013, 1, 5, 8, 0, tzinfo=pytz.utc),
datetime.datetime(2013, 1, 5, 9, 0, tzinfo=pytz.utc),
cal
)
event.save()
events = list(Event.objects.get_for_object(user, 'owner', inherit=False))
self.assertEqual(len(events), 0)
EventRelation.objects.create_relation(event, user, 'owner')
events = list(Event.objects.get_for_object(user, 'owner', inherit=False))
self.assertEqual(len(events), 1)
self.assertEqual(event, events[0])
def test_get_absolute(self):
cal = Calendar(name='MyCal')
cal.save()
rule = Rule(frequency="DAILY")
rule.save()
start = timezone.now() + datetime.timedelta(days=1)
event = self.__create_recurring_event(
'Non recurring event test get_occurrence',
start,
start + datetime.timedelta(hours=1),
start + datetime.timedelta(days=10),
rule,
cal)
event.save()
url = event.get_absolute_url()
self.assertEqual(reverse('event', kwargs={'event_id': event.id}), url)
def test_(self):
pass
class TestEventRelationManager(TestCase):
def test_get_events_for_object(self):
pass
|
|
"""This component provides HA sensor support for Ring Door Bell/Chimes."""
from __future__ import annotations
from dataclasses import dataclass
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import PERCENTAGE, SIGNAL_STRENGTH_DECIBELS_MILLIWATT
from homeassistant.core import callback
from homeassistant.helpers.icon import icon_for_battery_level
from . import DOMAIN
from .entity import RingEntityMixin
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a sensor for a Ring device."""
devices = hass.data[DOMAIN][config_entry.entry_id]["devices"]
entities = [
description.cls(config_entry.entry_id, device, description)
for device_type in ("chimes", "doorbots", "authorized_doorbots", "stickup_cams")
for description in SENSOR_TYPES
if device_type in description.category
for device in devices[device_type]
if not (device_type == "battery" and device.battery_life is None)
]
async_add_entities(entities)
class RingSensor(RingEntityMixin, SensorEntity):
"""A sensor implementation for Ring device."""
entity_description: RingSensorEntityDescription
_attr_should_poll = False # updates are controlled via the hub
def __init__(
self,
config_entry_id,
device,
description: RingSensorEntityDescription,
):
"""Initialize a sensor for Ring device."""
super().__init__(config_entry_id, device)
self.entity_description = description
self._extra = None
self._attr_name = f"{device.name} {description.name}"
self._attr_unique_id = f"{device.id}-{description.key}"
@property
def native_value(self):
"""Return the state of the sensor."""
sensor_type = self.entity_description.key
if sensor_type == "volume":
return self._device.volume
if sensor_type == "battery":
return self._device.battery_life
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if (
self.entity_description.key == "battery"
and self._device.battery_life is not None
):
return icon_for_battery_level(
battery_level=self._device.battery_life, charging=False
)
return self.entity_description.icon
class HealthDataRingSensor(RingSensor):
"""Ring sensor that relies on health data."""
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
await self.ring_objects["health_data"].async_track_device(
self._device, self._health_update_callback
)
async def async_will_remove_from_hass(self):
"""Disconnect callbacks."""
await super().async_will_remove_from_hass()
self.ring_objects["health_data"].async_untrack_device(
self._device, self._health_update_callback
)
@callback
def _health_update_callback(self, _health_data):
"""Call update method."""
self.async_write_ha_state()
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
# These sensors are data hungry and not useful. Disable by default.
return False
@property
def native_value(self):
"""Return the state of the sensor."""
sensor_type = self.entity_description.key
if sensor_type == "wifi_signal_category":
return self._device.wifi_signal_category
if sensor_type == "wifi_signal_strength":
return self._device.wifi_signal_strength
class HistoryRingSensor(RingSensor):
"""Ring sensor that relies on history data."""
_latest_event = None
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
await self.ring_objects["history_data"].async_track_device(
self._device, self._history_update_callback
)
async def async_will_remove_from_hass(self):
"""Disconnect callbacks."""
await super().async_will_remove_from_hass()
self.ring_objects["history_data"].async_untrack_device(
self._device, self._history_update_callback
)
@callback
def _history_update_callback(self, history_data):
"""Call update method."""
if not history_data:
return
kind = self.entity_description.kind
found = None
if kind is None:
found = history_data[0]
else:
for entry in history_data:
if entry["kind"] == kind:
found = entry
break
if not found:
return
self._latest_event = found
self.async_write_ha_state()
@property
def native_value(self):
"""Return the state of the sensor."""
if self._latest_event is None:
return None
return self._latest_event["created_at"]
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = super().extra_state_attributes
if self._latest_event:
attrs["created_at"] = self._latest_event["created_at"]
attrs["answered"] = self._latest_event["answered"]
attrs["recording_status"] = self._latest_event["recording"]["status"]
attrs["category"] = self._latest_event["kind"]
return attrs
@dataclass
class RingRequiredKeysMixin:
"""Mixin for required keys."""
category: list[str]
cls: type[RingSensor]
@dataclass
class RingSensorEntityDescription(SensorEntityDescription, RingRequiredKeysMixin):
"""Describes Ring sensor entity."""
kind: str | None = None
SENSOR_TYPES: tuple[RingSensorEntityDescription, ...] = (
RingSensorEntityDescription(
key="battery",
name="Battery",
category=["doorbots", "authorized_doorbots", "stickup_cams"],
native_unit_of_measurement=PERCENTAGE,
device_class="battery",
cls=RingSensor,
),
RingSensorEntityDescription(
key="last_activity",
name="Last Activity",
category=["doorbots", "authorized_doorbots", "stickup_cams"],
icon="mdi:history",
device_class=SensorDeviceClass.TIMESTAMP,
cls=HistoryRingSensor,
),
RingSensorEntityDescription(
key="last_ding",
name="Last Ding",
category=["doorbots", "authorized_doorbots"],
icon="mdi:history",
kind="ding",
device_class=SensorDeviceClass.TIMESTAMP,
cls=HistoryRingSensor,
),
RingSensorEntityDescription(
key="last_motion",
name="Last Motion",
category=["doorbots", "authorized_doorbots", "stickup_cams"],
icon="mdi:history",
kind="motion",
device_class=SensorDeviceClass.TIMESTAMP,
cls=HistoryRingSensor,
),
RingSensorEntityDescription(
key="volume",
name="Volume",
category=["chimes", "doorbots", "authorized_doorbots", "stickup_cams"],
icon="mdi:bell-ring",
cls=RingSensor,
),
RingSensorEntityDescription(
key="wifi_signal_category",
name="WiFi Signal Category",
category=["chimes", "doorbots", "authorized_doorbots", "stickup_cams"],
icon="mdi:wifi",
cls=HealthDataRingSensor,
),
RingSensorEntityDescription(
key="wifi_signal_strength",
name="WiFi Signal Strength",
category=["chimes", "doorbots", "authorized_doorbots", "stickup_cams"],
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
icon="mdi:wifi",
device_class="signal_strength",
cls=HealthDataRingSensor,
),
)
|
|
from openerp import models
from openerp.tools import mute_logger
from openerp.tests import common
from openerp.exceptions import AccessError
class TestAPI(common.TransactionCase):
""" test the new API of the ORM """
def assertIsRecordset(self, value, model):
self.assertIsInstance(value, models.BaseModel)
self.assertEqual(value._name, model)
def assertIsRecord(self, value, model):
self.assertIsRecordset(value, model)
self.assertTrue(len(value) <= 1)
def assertIsNull(self, value, model):
self.assertIsRecordset(value, model)
self.assertFalse(value)
@mute_logger('openerp.models')
def test_00_query(self):
""" Build a recordset, and check its contents. """
domain = [('name', 'ilike', 'j')]
ids = self.registry('res.partner').search(self.cr, self.uid, domain)
partners = self.env['res.partner'].search(domain)
# partners is a collection of browse records corresponding to ids
self.assertTrue(ids)
self.assertTrue(partners)
# partners and its contents are instance of the model
self.assertIsRecordset(partners, 'res.partner')
for p in partners:
self.assertIsRecord(p, 'res.partner')
self.assertEqual([p.id for p in partners], ids)
self.assertEqual(self.env['res.partner'].browse(ids), partners)
@mute_logger('openerp.models')
def test_01_query_offset(self):
""" Build a recordset with offset, and check equivalence. """
partners1 = self.env['res.partner'].search([], offset=10)
partners2 = self.env['res.partner'].search([])[10:]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_02_query_limit(self):
""" Build a recordset with offset, and check equivalence. """
partners1 = self.env['res.partner'].search([], limit=10)
partners2 = self.env['res.partner'].search([])[:10]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_03_query_offset_limit(self):
""" Build a recordset with offset and limit, and check equivalence. """
partners1 = self.env['res.partner'].search([], offset=3, limit=7)
partners2 = self.env['res.partner'].search([])[3:10]
self.assertIsRecordset(partners1, 'res.partner')
self.assertIsRecordset(partners2, 'res.partner')
self.assertEqual(list(partners1), list(partners2))
@mute_logger('openerp.models')
def test_04_query_count(self):
""" Test the search method with count=True. """
count1 = self.registry('res.partner').search(self.cr, self.uid, [], count=True)
count2 = self.env['res.partner'].search([], count=True)
self.assertIsInstance(count1, (int, long))
self.assertIsInstance(count2, (int, long))
self.assertEqual(count1, count2)
@mute_logger('openerp.models')
def test_05_immutable(self):
""" Check that a recordset remains the same, even after updates. """
domain = [('name', 'ilike', 'j')]
partners = self.env['res.partner'].search(domain)
self.assertTrue(partners)
ids = map(int, partners)
# modify those partners, and check that partners has not changed
self.registry('res.partner').write(self.cr, self.uid, ids, {'active': False})
self.assertEqual(ids, map(int, partners))
# redo the search, and check that the result is now empty
partners2 = self.env['res.partner'].search(domain)
self.assertFalse(partners2)
@mute_logger('openerp.models')
def test_06_fields(self):
""" Check that relation fields return records, recordsets or nulls. """
user = self.registry('res.users').browse(self.cr, self.uid, self.uid)
self.assertIsRecord(user, 'res.users')
self.assertIsRecord(user.partner_id, 'res.partner')
self.assertIsRecordset(user.groups_id, 'res.groups')
partners = self.env['res.partner'].search([])
for name, field in partners._fields.iteritems():
if field.type == 'many2one':
for p in partners:
self.assertIsRecord(p[name], field.comodel_name)
elif field.type == 'reference':
for p in partners:
if p[name]:
self.assertIsRecord(p[name], field.comodel_name)
elif field.type in ('one2many', 'many2many'):
for p in partners:
self.assertIsRecordset(p[name], field.comodel_name)
@mute_logger('openerp.models')
def test_07_null(self):
""" Check behavior of null instances. """
# select a partner without a parent
partner = self.env['res.partner'].search([('parent_id', '=', False)])[0]
# check partner and related null instances
self.assertTrue(partner)
self.assertIsRecord(partner, 'res.partner')
self.assertFalse(partner.parent_id)
self.assertIsNull(partner.parent_id, 'res.partner')
self.assertIs(partner.parent_id.id, False)
self.assertFalse(partner.parent_id.user_id)
self.assertIsNull(partner.parent_id.user_id, 'res.users')
self.assertIs(partner.parent_id.user_id.name, False)
self.assertFalse(partner.parent_id.user_id.groups_id)
self.assertIsRecordset(partner.parent_id.user_id.groups_id, 'res.groups')
@mute_logger('openerp.models')
def test_10_old_old(self):
""" Call old-style methods in the old-fashioned way. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
ids = map(int, partners)
# call method name_get on partners' model, and check its effect
res = partners._model.name_get(self.cr, self.uid, ids)
self.assertEqual(len(res), len(ids))
self.assertEqual(set(val[0] for val in res), set(ids))
@mute_logger('openerp.models')
def test_20_old_new(self):
""" Call old-style methods in the new API style. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method name_get on partners itself, and check its effect
res = partners.name_get()
self.assertEqual(len(res), len(partners))
self.assertEqual(set(val[0] for val in res), set(map(int, partners)))
@mute_logger('openerp.models')
def test_25_old_new(self):
""" Call old-style methods on records (new API style). """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method name_get on partner records, and check its effect
for p in partners:
res = p.name_get()
self.assertTrue(isinstance(res, list) and len(res) == 1)
self.assertTrue(isinstance(res[0], tuple) and len(res[0]) == 2)
self.assertEqual(res[0][0], p.id)
@mute_logger('openerp.models')
def test_30_new_old(self):
""" Call new-style methods in the old-fashioned way. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
ids = map(int, partners)
# call method write on partners' model, and check its effect
partners._model.write(self.cr, self.uid, ids, {'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
def test_40_new_new(self):
""" Call new-style methods in the new API style. """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method write on partners itself, and check its effect
partners.write({'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
def test_45_new_new(self):
""" Call new-style methods on records (new API style). """
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertTrue(partners)
# call method write on partner records, and check its effects
for p in partners:
p.write({'active': False})
for p in partners:
self.assertFalse(p.active)
@mute_logger('openerp.models')
@mute_logger('openerp.addons.base.ir.ir_model')
def test_50_environment(self):
""" Test environment on records. """
# partners and reachable records are attached to self.env
partners = self.env['res.partner'].search([('name', 'ilike', 'j')])
self.assertEqual(partners.env, self.env)
for x in (partners, partners[0], partners[0].company_id):
self.assertEqual(x.env, self.env)
for p in partners:
self.assertEqual(p.env, self.env)
# check that the current user can read and modify company data
partners[0].company_id.name
partners[0].company_id.write({'name': 'Fools'})
# create an environment with the demo user
demo = self.env['res.users'].search([('login', '=', 'demo')])[0]
demo_env = self.env(user=demo)
self.assertNotEqual(demo_env, self.env)
# partners and related records are still attached to self.env
self.assertEqual(partners.env, self.env)
for x in (partners, partners[0], partners[0].company_id):
self.assertEqual(x.env, self.env)
for p in partners:
self.assertEqual(p.env, self.env)
# create record instances attached to demo_env
demo_partners = partners.sudo(demo)
self.assertEqual(demo_partners.env, demo_env)
for x in (demo_partners, demo_partners[0], demo_partners[0].company_id):
self.assertEqual(x.env, demo_env)
for p in demo_partners:
self.assertEqual(p.env, demo_env)
# demo user can read but not modify company data
demo_partners[0].company_id.name
with self.assertRaises(AccessError):
demo_partners[0].company_id.write({'name': 'Pricks'})
# remove demo user from all groups
demo.write({'groups_id': [(5,)]})
# demo user can no longer access partner data
with self.assertRaises(AccessError):
demo_partners[0].company_id.name
@mute_logger('openerp.models')
def test_55_draft(self):
""" Test draft mode nesting. """
env = self.env
self.assertFalse(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
with env.do_in_draft():
self.assertTrue(env.in_draft)
self.assertTrue(env.in_draft)
self.assertTrue(env.in_draft)
self.assertFalse(env.in_draft)
@mute_logger('openerp.models')
def test_60_cache(self):
""" Check the record cache behavior """
Partners = self.env['res.partner']
pids = []
data = {
'partner One': ['Partner One - One', 'Partner One - Two'],
'Partner Two': ['Partner Two - One'],
'Partner Three': ['Partner Three - One'],
}
for p in data:
pids.append(Partners.create({
'name': p,
'child_ids': [(0, 0, {'name': c}) for c in data[p]],
}).id)
partners = Partners.search([('id', 'in', pids)])
partner1, partner2 = partners[0], partners[1]
children1, children2 = partner1.child_ids, partner2.child_ids
self.assertTrue(children1)
self.assertTrue(children2)
# take a child contact
child = children1[0]
self.assertEqual(child.parent_id, partner1)
self.assertIn(child, partner1.child_ids)
self.assertNotIn(child, partner2.child_ids)
# fetch data in the cache
for p in partners:
p.name, p.company_id.name, p.user_id.name, p.contact_address
self.env.check_cache()
# change its parent
child.write({'parent_id': partner2.id})
self.env.check_cache()
# check recordsets
self.assertEqual(child.parent_id, partner2)
self.assertNotIn(child, partner1.child_ids)
self.assertIn(child, partner2.child_ids)
self.assertEqual(set(partner1.child_ids + child), set(children1))
self.assertEqual(set(partner2.child_ids), set(children2 + child))
self.env.check_cache()
# delete it
child.unlink()
self.env.check_cache()
# check recordsets
self.assertEqual(set(partner1.child_ids), set(children1) - set([child]))
self.assertEqual(set(partner2.child_ids), set(children2))
self.env.check_cache()
@mute_logger('openerp.models')
def test_60_cache_prefetching(self):
""" Check the record cache prefetching """
self.env.invalidate_all()
# all the records of an instance already have an entry in cache
partners = self.env['res.partner'].search([])
partner_ids = self.env.prefetch['res.partner']
self.assertEqual(set(partners.ids), set(partner_ids))
# countries have not been fetched yet; their cache must be empty
countries = self.env['res.country'].browse()
self.assertFalse(self.env.prefetch['res.country'])
# reading ONE partner should fetch them ALL
countries |= partners[0].country_id
country_cache = self.env.cache[partners._fields['country_id']]
self.assertLessEqual(set(partners._ids), set(country_cache))
# read all partners, and check that the cache already contained them
country_ids = list(self.env.prefetch['res.country'])
for p in partners:
countries |= p.country_id
self.assertLessEqual(set(countries.ids), set(country_ids))
@mute_logger('openerp.models')
def test_70_one(self):
""" Check method one(). """
# check with many records
ps = self.env['res.partner'].search([('name', 'ilike', 'a')])
self.assertTrue(len(ps) > 1)
with self.assertRaises(ValueError):
ps.ensure_one()
p1 = ps[0]
self.assertEqual(len(p1), 1)
self.assertEqual(p1.ensure_one(), p1)
p0 = self.env['res.partner'].browse()
self.assertEqual(len(p0), 0)
with self.assertRaises(ValueError):
p0.ensure_one()
@mute_logger('openerp.models')
def test_80_contains(self):
""" Test membership on recordset. """
p1 = self.env['res.partner'].search([('name', 'ilike', 'a')], limit=1).ensure_one()
ps = self.env['res.partner'].search([('name', 'ilike', 'a')])
self.assertTrue(p1 in ps)
@mute_logger('openerp.models')
def test_80_set_operations(self):
""" Check set operations on recordsets. """
pa = self.env['res.partner'].search([('name', 'ilike', 'a')])
pb = self.env['res.partner'].search([('name', 'ilike', 'b')])
self.assertTrue(pa)
self.assertTrue(pb)
self.assertTrue(set(pa) & set(pb))
concat = pa + pb
self.assertEqual(list(concat), list(pa) + list(pb))
self.assertEqual(len(concat), len(pa) + len(pb))
difference = pa - pb
self.assertEqual(len(difference), len(set(difference)))
self.assertEqual(set(difference), set(pa) - set(pb))
self.assertLessEqual(difference, pa)
intersection = pa & pb
self.assertEqual(len(intersection), len(set(intersection)))
self.assertEqual(set(intersection), set(pa) & set(pb))
self.assertLessEqual(intersection, pa)
self.assertLessEqual(intersection, pb)
union = pa | pb
self.assertEqual(len(union), len(set(union)))
self.assertEqual(set(union), set(pa) | set(pb))
self.assertGreaterEqual(union, pa)
self.assertGreaterEqual(union, pb)
# one cannot mix different models with set operations
ps = pa
ms = self.env['ir.ui.menu'].search([])
self.assertNotEqual(ps._name, ms._name)
self.assertNotEqual(ps, ms)
with self.assertRaises(TypeError):
res = ps + ms
with self.assertRaises(TypeError):
res = ps - ms
with self.assertRaises(TypeError):
res = ps & ms
with self.assertRaises(TypeError):
res = ps | ms
with self.assertRaises(TypeError):
res = ps < ms
with self.assertRaises(TypeError):
res = ps <= ms
with self.assertRaises(TypeError):
res = ps > ms
with self.assertRaises(TypeError):
res = ps >= ms
@mute_logger('openerp.models')
def test_80_filter(self):
""" Check filter on recordsets. """
ps = self.env['res.partner'].search([])
customers = ps.browse([p.id for p in ps if p.customer])
# filter on a single field
self.assertEqual(ps.filtered(lambda p: p.customer), customers)
self.assertEqual(ps.filtered('customer'), customers)
# filter on a sequence of fields
self.assertEqual(
ps.filtered(lambda p: p.parent_id.customer),
ps.filtered('parent_id.customer')
)
@mute_logger('openerp.models')
def test_80_map(self):
""" Check map on recordsets. """
ps = self.env['res.partner'].search([])
parents = ps.browse()
for p in ps: parents |= p.parent_id
# map a single field
self.assertEqual(ps.mapped(lambda p: p.parent_id), parents)
self.assertEqual(ps.mapped('parent_id'), parents)
# map a sequence of fields
self.assertEqual(
ps.mapped(lambda p: p.parent_id.name),
[p.parent_id.name for p in ps]
)
self.assertEqual(
ps.mapped('parent_id.name'),
[p.name for p in parents]
)
|
|
# File to hold auxiliary functions for performing logistic regression with the Titanic data set.
import numpy as np
import pandas as pd
def cat2indicator(df, columns):
"""
Convert columns of categorical variables to multiple columns of
indicator variables.
"""
# Create new dataframe to hold data
df2 = df.copy()
# Process each column included in columns argument
for column in columns:
# Make new column from each unique value in the categorical column
for value in df2[column].unique():
colname = column+'_'+str(value)
newcol = np.zeros(len(df2))
newcol[np.array(df[column]==value)] = 1
df2[colname] = newcol
# Drop original column of categorical variables
df2.drop(column,axis=1,inplace=True)
# Return dataframe to calling program
return df2
def nametitles(df):
"""
Get title information from the name column in the dataframe.
Add indicator column for each title.
"""
# Create new dataframe to hold data
df2 = df.copy()
# Get list of titles from the name list
titles = []
for name in df2.Name:
thistitle = name.split('.')[0].split(' ')[-1]
titles.append(thistitle)
# Add this column to the dataframe
df2['Title'] = titles
## For each title, add an indicator variable marking it as a title
#for title in titles:
# barray = []
# for name in df2.Name:
# if title+'.' in name: barray.append(True)
# else: barray.append(False)
# newcol = 'Title_'+title
# df2[newcol] = barray
#
## Drop name list from the dataframe
#df2.drop('name',axis=1,inplace=True)
# Return dataframe to calling program
return df2
def add_interactions(df, X, variables):
"""
df - dataframe in which to place interaction terms
variables - list of names from which to create interaction terms
"""
# Get dummy variables for each category
#vardict = {}
#for var in variables:
# # Get dummy variables for this category
# vardict[var] = pd.get_dummies(df[var])
# Get dummy variables for each category
vardict = {}
for var in variables:
thesecols = []
for col in X:
if (var==col[:len(var)]) & ('*' not in col):
thesecols.append( col )
vardict[var] = thesecols
# Add interaction between all items in the variable dictionary
if len(variables)==2:
for value1 in vardict.values()[0]:
for value2 in vardict.values()[1]:
newname = value1+'_*_'+value2
X[newname] = X[value1]*X[value2]
# # Calculate ineraction terms between all items in the dictionary
# if len(variables)==2:
# for column1 in vardict[variables[0]].columns:
# for column2 in vardict[variables[1]].columns:
# newname = str(column1)+'_*_'+str(column2)
# X[newname] = vardict[variables[0]][column1]*vardict[variables[1]][column2]
# colname = str(vardict[variables[0]].columns[0]) + '_*_' + str(vardict[variables[1]].columns[0])
#
# if len(variables)==3:
# for column1 in vardict[variables[0]].columns:
# for column2 in vardict[variables[1]].columns:
# for column3 in vardict[variables[2]].columns:
# newname = str(column1)+'_*_'+str(column2)+'_*_'+str(column3)
# X[newname] = vardict[variables[0]][column1]*vardict[variables[1]][column2]*vardict[variables[2]][column2]
# colname = str(vardict[variables[0]].columns[0]) + '_*_' + str(vardict[variables[1]].columns[0]) + '_*_' + str(vardict[variables[2]].columns[0])
#
#
# # Drop one column from those above
#X.drop(colname,axis=1,inplace=True)
# Return dataframe to calling program
return X
def add_dummy(df, categories, label, drop=False):#, interaction=None):
"""
df - dataframe in which to place new dummy variables
categories - categorical variable from which make dummy variables
label - string of how to label each dummy column.
drop - Boolean indicating whether to drop a column of dummies
"""
# Get dataframe of dummy variables from categories
dum = pd.get_dummies(categories)
# Set index to match that of new dataframe
dum = dum.set_index(df.index)
# Label columns of dummy variables
dum.columns = [label+'_'+str(val) for val in dum.columns]
# Drop one column of dummy variable so that no column is
# a linear combination of another. Do this when using
# a constant in the linear model.
if drop==True:
dum.drop(dum.columns[0],axis=1,inplace=True)
# Join new dummy dataframe to the dataframe of variables
# for the regression
df = df.join(dum)
# Return new updated dataframe to the calling program
return df
def make_matrix(df,matchcols=None):
"""
Function to construct the matrix for the linear regression.
matchcols - column names to include in the new matrix, regardless
of whether the new matrix may be singular.
"""
# Fill embarcation location nan with a string
df.Embarked = df.Embarked.fillna('nan')
# Create name category from titles in the name column
df = nametitles(df)
# Define new dataframe to hold the matrix
X = pd.DataFrame(index=df.index)
# Add categorical variables to the matrix
X = add_dummy(X, df.Embarked, 'Embarked', drop=True)
X = add_dummy(X, df.Sex, 'Sex', drop=True)
X = add_dummy(X, df.Pclass, 'Pclass', drop=True)
X = add_dummy(X, df.Title, 'Title', drop=True)
goodtitles = ['Master']
#goodtitles = ['Mr','Mrs','Miss','Master']#,'Rev', 'Dr']#,
#'Jonkheer', 'Countess','Lady','Major','Capt','Sir']
for column in X.columns:
if column[:5]=='Title':
if column[6:] not in goodtitles:
X.drop(column,axis=1,inplace=True)
# Make categorical variables from Fare and Age
#df['Fare'][df.Fare.isnull()] = df.Fare.notnull().mean()
#df.Fare = np.ceil( df.Fare / 10. ) * 10.
#df.Fare[df.Fare>50] = 50.
#X = add_dummy(X, df.Fare, 'Fare', drop=True)
#df['Age'][df.Age.isnull()] = df.Age.notnull().mean()
#df.Age = np.ceil( df.Age / 10. ) * 10.
#df.Age[df.Age>60] = 60.
#X = add_dummy(X, df.Age, 'Age', drop=True)
# Add continuous variables to the dataframe
#X['Fare'] = (df.Fare - df.Fare.mean()) / df.Fare.std()
#X['Fare'][X.Fare.isnull()] = X.Fare.notnull().mean()
#X['Fare2'] = X.Fare**2
#X['Age'] = (df.Age - df.Age.mean()) / df.Age.std()
#X['Age'][X.Age.isnull()] = X.Age.notnull().mean()
#X['Age2'] = X.Age**2
# Add interaction terms
X = add_interactions(df, X, ['Pclass','Sex'])
X = add_interactions(df, X, ['Sex','Embarked'])
#X = add_interactions(df, X, ['Embarked','Pclass'] )
#X = add_interactions(df, X, ['Age','Sex'])
#X = add_interactions(df, X, ['Age','Pclass'])
# Remove any columns that are a single constant or all zeros
if matchcols is None:
for col in X.columns:
if (np.std(X[col])==0):
X.drop(col,axis=1,inplace=True)
else:
# Remove columns not in matchcols
for col in X.columns:
if col not in matchcols:
X.drop(col,axis=1,inplace=True)
# Add matchcols not in columns
for col in matchcols:
if col not in X.columns:
X[col] = np.zeros(len(X))
# Order columns to match that of the input columns
X = X.reindex_axis(matchcols, axis=1)
# Add column of ones as a constant
X.insert(0,'const',np.ones(len(X))) #X = sm.add_constant(X,prepend=True)
# Return dataframe for regression to calling program
return X
def score(obs, prediction):
"""
Calculate score of the prediction.
"""
return float(sum(obs==prediction)) / len(obs)
|
|
import unittest
import io
from python_digest import *
from python_digest.http import *
from python_digest.utils import *
class HttpTests(unittest.TestCase):
def test_parse_quoted_string(self):
test_cases = [
('""', ''), # OK
('"hello"', 'hello'), # OK
('', False), # no quotes
('"', False), # no end-quote
('a"', False), # no start-quote
('"a', False), # no end-quote
('a', False), # no quotes
('"\\""', '"'), # escaping quote
('"\\\\"', '\\'), # escaping backslash
('"hello\\"', False) # no end-quote
]
for test_case in test_cases:
self.assertEqual(test_case[1], parse_quoted_string(test_case[0]))
def test_parse_token(self):
legal_tokens = [
"hello_world!",
"hmm.",
"123-47"]
illegal_tokens = [
"tabit\t",
"a/b",
"what's up, doc?"]
for token in legal_tokens:
self.assertEqual(token, parse_token(token))
for token in illegal_tokens:
self.assertFalse(parse_token(token))
class PythonDigestTests(unittest.TestCase):
def test_validate_uri(self):
self.assertTrue(validate_uri('http://server:port/some/path', '/some/path'))
self.assertTrue(validate_uri('/some/path', '/some/path'))
self.assertTrue(validate_uri('http://server:port/some/path?q=v&x=y', '/some/path'))
self.assertTrue(validate_uri('http://server:port/spacey%20path', '/spacey path'))
self.assertTrue(validate_uri('http://server:port/%7euser/', '/~user/'))
self.assertTrue(validate_uri('http://server:port/%7Euser/', '/~user/'))
self.assertFalse(validate_uri('http://server:port/some/other/path', '/some/path'))
self.assertFalse(validate_uri('/some/other/path', '/some/path'))
self.assertFalse(validate_uri('http://server:port/some/other/path?q=v&x=y',
'/some/path'))
def test_nonce_functions(self):
timestamp = 12345.01
nonce = calculate_nonce(timestamp, 'secret')
self.assertTrue(validate_nonce(nonce, 'secret'))
self.assertFalse(validate_nonce(nonce, 'other secret'))
self.assertFalse(validate_nonce(nonce[:-1], 'secret'))
self.assertEqual(timestamp, get_nonce_timestamp(nonce))
def test_parse_digest_challenge(self):
challenge_header = 'Digest nonce="1263312775.17:7a4267d73fb67fe9da897bb5445153ae", ' \
'realm="API", algorithm="MD5", opaque="38A924C1874E52F9A379BCA9F64D04F6", ' \
'qop="auth", stale="false"'
self.assertTrue(is_digest_challenge(challenge_header))
self.assertFalse(is_digest_challenge('Basic realm="API"'))
digest_challenge = parse_digest_challenge(challenge_header)
self.assertEqual('1263312775.17:7a4267d73fb67fe9da897bb5445153ae',
digest_challenge.nonce)
self.assertEqual('API', digest_challenge.realm)
self.assertEqual('MD5', digest_challenge.algorithm)
self.assertEqual('38A924C1874E52F9A379BCA9F64D04F6', digest_challenge.opaque)
self.assertEqual('auth', digest_challenge.qop)
self.assertEqual(False, digest_challenge.stale)
def test_build_digest_challenge(self):
timestamp = 12345.01
challenge = build_digest_challenge(timestamp, 'secret', 'myrealm', 'myopaque', False)
self.assertEqual('digest ', challenge[0:7].lower())
challenge_parts = parse_parts(challenge[7:])
self.assertEqual(challenge_parts['realm'], 'myrealm')
self.assertEqual(challenge_parts['opaque'], 'myopaque')
self.assertEqual(challenge_parts['qop'], 'auth')
if 'algorithm' in challenge_parts:
self.assertEqual(challenge_parts['algorithm'], 'MD5')
if 'stale' in challenge_parts:
self.assertEqual(challenge_parts['stale'].lower(), 'false')
self.assertTrue(validate_nonce(challenge_parts['nonce'], 'secret'))
self.assertEqual(12345.01, get_nonce_timestamp(challenge_parts['nonce']))
def test_build_authorization_request(self):
# One calling pattern
request_header = build_authorization_request(
username='erik', realm='API', method='GET',
uri='/api/accounts/account/erik/',
nonce='1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc',
opaque='D80E5E5109EB9918993B5F886D14D2E5', nonce_count=3,
password='test', client_nonce='c316b5722463aee9')
self.assertTrue(is_digest_credential(request_header))
digest_response = parse_digest_credentials(request_header)
self.assertEqual(digest_response.username, 'erik')
self.assertEqual(digest_response.qop, 'auth')
self.assertEqual(digest_response.algorithm, 'MD5')
self.assertEqual(digest_response.uri, '/api/accounts/account/erik/')
self.assertEqual(digest_response.nonce,
'1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc')
self.assertEqual(digest_response.opaque, 'D80E5E5109EB9918993B5F886D14D2E5')
self.assertEqual(digest_response.realm, 'API')
self.assertEqual(digest_response.response, 'a8f5c1289e081a7a0f5faa91d24f3b46')
self.assertEqual(digest_response.nc, 3)
self.assertEqual(digest_response.cnonce, 'c316b5722463aee9')
# Second calling pattern
challenge_header = \
'Digest nonce="1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc", ' \
'realm="API", algorithm="MD5", opaque="D80E5E5109EB9918993B5F886D14D2E5", ' \
'qop="auth", stale="false"'
digest_challenge = parse_digest_challenge(challenge_header)
request_header = build_authorization_request(username='erik', method='GET',
uri='/api/accounts/account/erik/',
nonce_count=3, password='test',
digest_challenge=digest_challenge)
self.assertTrue(is_digest_credential(request_header))
digest_response = parse_digest_credentials(request_header)
self.assertEqual(digest_response.nonce,
'1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc')
self.assertEqual(digest_response.realm, 'API')
self.assertEqual(digest_response.opaque, 'D80E5E5109EB9918993B5F886D14D2E5')
# Third calling pattern
challenge_header = \
'Digest nonce="1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc", ' \
'realm="API", algorithm="MD5", opaque="D80E5E5109EB9918993B5F886D14D2E5", ' \
'qop="auth", stale="false"'
request_header = build_authorization_request(username='erik', method='GET',
uri='/api/accounts/account/erik/',
nonce_count=3, password='test',
digest_challenge=challenge_header)
digest_response = parse_digest_credentials(request_header)
self.assertEqual(digest_response.nonce,
'1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc')
def test_unicode_credentials(self):
username = "mickey\xe8\xe9"
challenge_header = \
'Digest nonce="1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc", ' \
'realm="API", algorithm="MD5", opaque="D80E5E5109EB9918993B5F886D14D2E5", ' \
'qop="auth", stale="false"'
request_header = build_authorization_request(
username=username, method='GET', uri='/api/accounts/account/erik/',
nonce_count=3,password=username, digest_challenge=challenge_header)
digest_response = parse_digest_credentials(request_header)
self.assertEqual(digest_response.username, 'mickey\xc3\xa8\xc3\xa9')
kd = calculate_request_digest(
'GET', calculate_partial_digest(username, 'API', username),
digest_response)
self.assertEquals(digest_response.response, kd)
def test_calculate_request_digest(self):
# one calling pattern
header = \
'Digest username="erik", realm="API", ' \
'nonce="1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc", ' \
'uri="/api/accounts/account/erik/", algorithm=MD5, ' \
'response="a8f5c1289e081a7a0f5faa91d24f3b46", ' \
'opaque="D80E5E5109EB9918993B5F886D14D2E5", qop=auth, nc=00000003, ' \
'cnonce="c316b5722463aee9"'
digest_response = parse_digest_credentials(header)
kd = calculate_request_digest('GET', calculate_partial_digest('erik', 'API', 'test'),
digest_response)
self.assertEqual(kd, 'a8f5c1289e081a7a0f5faa91d24f3b46')
# other calling pattern
kd = calculate_request_digest(
'GET', calculate_partial_digest('erik', 'API', 'test'),
nonce='1263251163.72:0D93:6c012a9bc11e535ff2cddb54663e44bc',
uri='/api/accounts/account/erik/',
nonce_count=3, client_nonce='c316b5722463aee9')
self.assertEqual(kd, 'a8f5c1289e081a7a0f5faa91d24f3b46')
def test_calculate_partial_digest(self):
self.assertEqual('ecfc9eadfaecf48a1edcf894992350dd',
calculate_partial_digest('erik', 'API', 'test'))
def test_parse_digest_response(self):
digest_response_string = \
'username="erik", realm="API", ' \
'nonce="the_nonce", uri="/the/uri", ' \
'response="18824d23aa8649c6231978d3e8532528", ' \
'opaque="the_opaque", ' \
'qop=auth, nc=0000000a, cnonce="the_cnonce"'
digest_response = parse_digest_response(digest_response_string)
self.assertEqual('erik', digest_response.username)
self.assertEqual('API', digest_response.realm)
self.assertEqual('the_nonce', digest_response.nonce)
self.assertEqual('/the/uri', digest_response.uri)
self.assertEqual('18824d23aa8649c6231978d3e8532528', digest_response.response)
self.assertEqual('the_opaque', digest_response.opaque)
self.assertEqual('auth', digest_response.qop)
self.assertEqual(10, digest_response.nc)
self.assertEqual('the_cnonce', digest_response.cnonce)
self.assertEqual('MD5', digest_response.algorithm)
# missing username
invalid_digest_response_string = \
'realm="API", ' \
'nonce="the_nonce", uri="/the/uri", ' \
'response="18824d23aa8649c6231978d3e8532528", ' \
'opaque="the_opaque", ' \
'qop=auth, nc=0000000a, cnonce="the_cnonce"'
self.assertEqual(None, parse_digest_response(invalid_digest_response_string))
# invalid nc
invalid_digest_response_string = \
'username="erik", realm="API", ' \
'nonce="the_nonce", uri="/the/uri", ' \
'response="18824d23aa8649c6231978d3e8532528", ' \
'opaque="the_opaque", ' \
'qop=auth, nc=0000000X, cnonce="the_cnonce"'
self.assertEqual(None, parse_digest_response(invalid_digest_response_string))
# invalid quoted-string
invalid_digest_response_string = \
'username="erik", realm="API", ' \
'nonce="the_nonce", uri="/the/uri", ' \
'response="18824d23aa8649c6231978d3e8532528", ' \
'opaque="the_opaque", ' \
'qop=auth, nc=0000000X, cnonce="the_cnonce'
self.assertEqual(None, parse_digest_response(invalid_digest_response_string))
def test_is_digest_credential(self):
header_string = \
'Digest username="erik", realm="API", ' \
'nonce="the_nonce", uri="/the/uri", ' \
'response="18824d23aa8649c6231978d3e8532528", ' \
'opaque="the_opaque", ' \
'qop=auth, nc=0000000a, cnonce="the_cnonce"'
self.assertTrue(is_digest_credential(header_string))
self.assertFalse(is_digest_credential("Basic A7F="))
def test_parse_digest_credentials(self):
header_string = \
'Digest username="erik", realm="API", ' \
'nonce="the_nonce", uri="/the/uri", ' \
'response="18824d23aa8649c6231978d3e8532528", ' \
'opaque="the_opaque", ' \
'qop=auth, nc=0000000a, cnonce="the_cnonce"'
self.assertEqual('erik', parse_digest_credentials(header_string).username)
class UtilsTests(unittest.TestCase):
def test_parse_parts_with_embedded_comma(self):
valid_parts = ('username="wikiphoto", realm="API", '
'nonce="1268201053.67:5140:070c3f060614cbe244e1a713768e0211", '
'uri="/api/for/wikiphoto/missions/missions/Oh, the Memories/", '
'response="d9fb4f9882386339931cf088c74f3942", '
'opaque="11861771750D1B343DF11FE4C223725A", '
'algorithm="MD5", cnonce="17ec1ffae9e01d125d65accef45157fa", '
'nc=00000061, qop=auth')
self.assertEquals("/api/for/wikiphoto/missions/missions/Oh, the Memories/",
parse_parts(valid_parts)['uri'])
def test_parse_parts_with_escaped_quote(self):
valid_parts = ('username="wiki\\"photo"')
self.assertEquals("wiki\"photo",
parse_parts(valid_parts)['username'])
def test_parse_parts(self):
valid_parts = ' hello = world , my = " name is sam " '
self.assertEquals({'hello': 'world', 'my': " name is sam "}, parse_parts(valid_parts))
invalid_parts = ' hello world , my = " name is sam " '
self.assertEquals(None, parse_parts(invalid_parts))
# known issue: ',' or '=' could appear in a quoted-string and would be interpreted as
# ending the part
invalid_parts = ' hello=world=goodbye , my = " name is sam " '
self.assertEquals(None, parse_parts(invalid_parts))
def test_escaped_character_state(self):
for c in 'a\\\',"= _-1#':
io = StringIO()
ecs = EscapedCharacterState(io)
self.assertTrue(ecs.character(c))
self.assertEquals(c, io.getvalue())
def test_value_leading_whitespace_state_unquoted_value(self):
io = StringIO()
vlws = ValueLeadingWhitespaceState(io)
self.assertFalse(vlws.character(' '))
self.assertFalse(vlws.character('\r'))
self.assertFalse(vlws.character('\n'))
self.assertFalse(vlws.character(chr(9)))
self.assertFalse(vlws.character(' '))
self.assertFalse(vlws.character('a'))
self.assertTrue(vlws.character(','))
self.assertEquals('a', io.getvalue())
def test_value_leading_whitespace_state_quoted_value(self):
io = StringIO()
vlws = ValueLeadingWhitespaceState(io)
self.assertFalse(vlws.character(' '))
self.assertFalse(vlws.character('"'))
self.assertFalse(vlws.character('\\'))
self.assertFalse(vlws.character('"'))
self.assertFalse(vlws.character('"'))
self.assertTrue(vlws.character(','))
self.assertEquals('"', io.getvalue())
def test_value_leading_whitespace_state_error(self):
vlws = KeyTrailingWhitespaceState()
self.assertFalse(vlws.character(' '))
self.assertRaises(ValueError, vlws.character, '<')
def test_key_trailing_whitespace_state(self):
ktws = KeyTrailingWhitespaceState()
self.assertFalse(ktws.character(' '))
self.assertFalse(ktws.character('\r'))
self.assertFalse(ktws.character('\n'))
self.assertFalse(ktws.character(chr(9)))
self.assertFalse(ktws.character(' '))
self.assertTrue(ktws.character('='))
def test_key_trailing_whitespace_state_error(self):
for c in 'a,"':
ktws = KeyTrailingWhitespaceState()
self.assertFalse(ktws.character(' '))
self.assertRaises(ValueError, ktws.character, c)
def test_quoted_key_state(self):
io = StringIO()
qks = QuotedKeyState(io)
for c in '\\"this is my string,\\" he said!':
self.assertFalse(qks.character(c))
self.assertFalse(qks.character('"'))
self.assertFalse(qks.character(' '))
self.assertFalse(qks.character('\r'))
self.assertTrue(qks.character('='))
self.assertEquals('"this is my string," he said!', io.getvalue())
def test_quoted_key_state_eof_error(self):
io = StringIO()
qks = QuotedKeyState(io)
self.assertFalse(qks.character('a'))
self.assertFalse(qks.character('"'))
self.assertFalse(qks.character(' '))
self.assertFalse(qks.character('\r'))
self.assertRaises(ValueError, qks.close)
def test_value_trailing_whitespace_state(self):
vtws = ValueTrailingWhitespaceState()
self.assertFalse(vtws.character(' '))
self.assertFalse(vtws.character('\r'))
self.assertFalse(vtws.character('\n'))
self.assertFalse(vtws.character(chr(9)))
self.assertFalse(vtws.character(' '))
self.assertTrue(vtws.character(','))
def test_value_trailing_whitespace_state_eof(self):
vtws = ValueTrailingWhitespaceState()
self.assertFalse(vtws.character(' '))
self.assertTrue(vtws.close())
def test_value_trailing_whitespace_state_error(self):
for c in 'a="':
vtws = ValueTrailingWhitespaceState()
self.assertFalse(vtws.character(' '))
self.assertRaises(ValueError, vtws.character, c)
def test_unquoted_key_state_with_whitespace(self):
io = StringIO()
uks = UnquotedKeyState(io)
for c in 'hello_world':
self.assertFalse(uks.character(c))
self.assertFalse(uks.character(' '))
self.assertFalse(uks.character('\r'))
self.assertTrue(uks.character('='))
self.assertEquals('hello_world', io.getvalue())
def test_unquoted_key_state_without_whitespace(self):
io = StringIO()
uks = UnquotedKeyState(io)
for c in 'hello_world':
self.assertFalse(uks.character(c))
self.assertTrue(uks.character('='))
self.assertEquals('hello_world', io.getvalue())
def test_unquoted_key_state_error(self):
io = StringIO()
uks = UnquotedKeyState(io)
self.assertFalse(uks.character('a'))
self.assertRaises(ValueError, uks.character, '<')
def test_quoted_value_state(self):
io = StringIO()
qvs = QuotedValueState(io)
for c in '\\"this is my string,\\" he said!':
self.assertFalse(qvs.character(c))
self.assertFalse(qvs.character('"'))
self.assertFalse(qvs.character(' '))
self.assertFalse(qvs.character('\r'))
self.assertTrue(qvs.character(','))
self.assertEquals('"this is my string," he said!', io.getvalue())
def test_quoted_value_state_eof(self):
io = StringIO()
qvs = QuotedValueState(io)
for c in '\\"this is my string,\\" he said!':
self.assertFalse(qvs.character(c))
self.assertFalse(qvs.character('"'))
self.assertTrue(qvs.close())
self.assertEquals('"this is my string," he said!', io.getvalue())
def test_quoted_value_state_error(self):
io = StringIO()
qvs = QuotedValueState(io)
for c in '\\"this is my string,\\" he said!':
self.assertFalse(qvs.character(c))
self.assertFalse(qvs.character('"'))
self.assertRaises(ValueError, qvs.character, '=')
def test_new_part_state(self):
# Try a variety of strings, both with comma and eof terminating them
for ending in (lambda s: s.character(','), lambda s: s.close()):
parts = {}
for s in ('hello=world', ' hi = bye ', ' "what?" = "\\"ok\\""'):
nps = NewPartState(parts)
for c in s:
self.assertFalse(nps.character(c))
self.assertTrue(ending(nps))
self.assertEquals(parts, {'hello': 'world',
'hi': 'bye',
'what?': '"ok"'})
def test_new_part_state_error(self):
nps = NewPartState(parts={})
self.assertRaises(ValueError, nps.character, '<')
def test_foundation_state(self):
fs = FoundationState({'default': 'value', 'hello': 'bye bye'})
for c in ' hello=world, my=turn, yes=no , one = 1, " \\"quoted\\" " = unquoted ':
self.assertFalse(fs.character(c))
fs.close()
self.assertEquals(fs.result(), {'default': 'value',
'hello': 'world',
'my': 'turn',
'yes': 'no',
'one': '1',
' "quoted" ': 'unquoted'})
def test_foundation_state_error(self):
for s in ('', ' ', 'hello', 'hello=', 'hello=world,', 'hello=world, ',
'hello=world, a'):
fs = FoundationState({'default': 'value'})
for c in s:
self.assertFalse(fs.character(c))
self.assertRaises(ValueError, fs.close)
if __name__ == '__main__':
unittest.main()
|
|
from django.core.urlresolvers import reverse
from unittest2 import skipIf
from django.test import RequestFactory
from django.test.utils import override_settings
from django.test.client import Client
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.test.api.base import BaseAPITest
from sqlshare_rest.test import missing_url
from sqlshare_rest.dao.dataset import create_dataset_from_query
from sqlshare_rest.dao.dataset import set_dataset_accounts, set_dataset_emails
from sqlshare_rest.dao.query import create_query
from sqlshare_rest.models import User, Query, DatasetSharingEmail
from sqlshare_rest.dao.user import get_user
from sqlshare_rest.dao.dataset import create_dataset_from_query
import json
import re
@skipIf(missing_url("sqlshare_view_dataset_list"), "SQLShare REST URLs not configured")
@override_settings(MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
)
class UserOverrideAPITest(BaseAPITest):
def setUp(self):
super(UserOverrideAPITest, self).setUp()
# Try to cleanup from any previous test runs...
self.remove_users = []
self.client = Client()
def test_user_api(self):
self.remove_users = []
user = "overrider"
self.remove_users.append(user)
self.remove_users.append("over2")
user_auth_headers = self.get_auth_header_for_username(user)
backend = get_backend()
user_obj = backend.get_user(user)
self._clear_override(user_obj)
url = reverse("sqlshare_view_user")
response = self.client.get(url, **user_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["username"], user)
self.assertEquals(data["schema"], user)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response = self.client.get(url, **user_auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(data["username"], "over2")
self.assertEquals(data["schema"], "over2")
def test_dataset_api(self):
self.remove_users = []
user = "overrider2"
self.remove_users.append(user)
self.remove_users.append("over2")
user_auth_headers = self.get_auth_header_for_username(user)
backend = get_backend()
user_obj = backend.get_user(user)
self._clear_override(user_obj)
# Make sure we have the user we think...
ds_overrider_1 = create_dataset_from_query(user, "ds_overrider_3", "SELECT (1)")
url = reverse("sqlshare_view_dataset", kwargs={ 'owner': user,
'name': "ds_overrider_3"})
response = self.client.get(url, **user_auth_headers)
self.assertEquals(response.status_code, 200)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
# Now test get as someone else.
response = self.client.get(url, **user_auth_headers)
self.assertEquals(response.status_code, 403)
data = {
"sql_code": "SELECT('FAIL')",
"is_public": False,
"is_snapshot": False,
"description": "This is a test dataset",
}
json_data = json.dumps(data)
# Test the right response from the PUT
self.assertRaisesRegexp(Exception, "Owner doesn't match user: .*", self.client.put, url, data=json_data, **user_auth_headers)
# Test the right response from the PATCH
self.assertRaisesRegexp(Exception, "Owner doesn't match user: .*", self.client.patch, url, data=json_data, **user_auth_headers)
# Test the right response from the DELETE
self.assertRaisesRegexp(Exception, "Owner doesn't match user: .*", self.client.delete, url, data=json_data, **user_auth_headers)
url = reverse("sqlshare_view_download_dataset", kwargs={ 'owner': user,
'name': "ds_overrider_3"})
response = self.client.post(url, **user_auth_headers)
self.assertEquals(response.status_code, 403)
def test_dataset_list_owned(self):
self.remove_users = []
user = "overrider_owner_list"
self.remove_users.append(user)
self.remove_users.append("over2")
auth_headers = self.get_auth_header_for_username(user)
backend = get_backend()
user_obj = backend.get_user(user)
self._clear_override(user_obj)
ds_overrider_1 = create_dataset_from_query(user, "ds_overrider_list1", "SELECT (1)")
url = reverse("sqlshare_view_dataset_list")
response = self.client.get(url, **auth_headers)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 1)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response = self.client.get(url, **auth_headers)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 0)
def test_dataset_list_shared_with(self):
self.remove_users = []
user = "overrider_owner_sharer2"
self.remove_users.append(user)
self.remove_users.append("overrider_recipient1")
self.remove_users.append("over2")
backend = get_backend()
backend.get_user(user)
user_obj = backend.get_user("overrider_recipient1")
auth_headers = self.get_auth_header_for_username("overrider_recipient1")
self._clear_override(user_obj)
ds_overrider_1 = create_dataset_from_query(user, "ds_overrider_list2", "SELECT (1)")
set_dataset_accounts(ds_overrider_1, [ "overrider_recipient1" ])
url = reverse("sqlshare_view_dataset_shared_list")
response = self.client.get(url, **auth_headers)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 1)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response = self.client.get(url, **auth_headers)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 0)
def test_dataset_list_all(self):
self.remove_users = []
user = "overrider_owner_list_all"
self.remove_users.append(user)
self.remove_users.append("over2")
auth_headers = self.get_auth_header_for_username(user)
backend = get_backend()
user_obj = backend.get_user(user)
self._clear_override(user_obj)
ds_overrider_1 = create_dataset_from_query(user, "ds_overrider_list3", "SELECT (1)")
url = reverse("sqlshare_view_dataset_all_list")
response = self.client.get(url, **auth_headers)
data = json.loads(response.content.decode("utf-8"))
# Other tests make datasets public, so we can't just count on a static number
actual_owner_count = len(data)
self.assertTrue(actual_owner_count >= 1)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response = self.client.get(url, **auth_headers)
data = json.loads(response.content.decode("utf-8"))
# This override user should have 1 fewer than the owner
self.assertEquals(len(data), actual_owner_count-1)
def test_dataset_list_tagged(self):
self.remove_users = []
user = "overrider_owner_list_tagged"
self.remove_users.append(user)
self.remove_users.append("over2")
auth_headers = self.get_auth_header_for_username(user)
backend = get_backend()
user_obj = backend.get_user(user)
self._clear_override(user_obj)
ds_overrider_1 = create_dataset_from_query(user, "ds_overrider_list4", "SELECT (1)")
tag_url = reverse("sqlshare_view_dataset_tags", kwargs={ 'owner': user, 'name': "ds_overrider_list4"})
data = [ { "name": user, "tags": [ "tag1", "test_override" ] } ]
self.client.put(tag_url, data=json.dumps(data), **auth_headers)
url = reverse("sqlshare_view_dataset_tagged_list", kwargs={"tag": "test_override" })
response = self.client.get(url, **auth_headers)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 1)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response = self.client.get(url, **auth_headers)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 0)
def test_start_query(self):
owner = "override_query_user1"
self.remove_users.append(owner)
self.remove_users.append("user2")
backend = get_backend()
user_obj = backend.get_user(owner)
self._clear_override(user_obj)
post_url = reverse("sqlshare_view_query_list")
auth_headers = self.get_auth_header_for_username(owner)
data = {
"sql": "select(1)"
}
response = self.client.post(post_url, data=json.dumps(data), content_type='application/json', **auth_headers)
self.assertEquals(response.status_code, 202)
values = json.loads(response.content.decode("utf-8"))
self.assertEquals(values["error"], None)
self.assertEquals(values["sql_code"], "select(1)")
url = values["url"]
self.assertTrue(re.match("/v3/db/query/[\d]+$", url))
response = self.client.get(url, **auth_headers)
self.assertEquals(response.status_code, 202)
values = json.loads(response.content.decode("utf-8"))
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response = self.client.get(url, **auth_headers)
self.assertEquals(response.status_code, 403)
def test_dataset_tags(self):
self.remove_users = []
user = "overrider3"
self.remove_users.append(user)
self.remove_users.append("over2")
user_auth_headers = self.get_auth_header_for_username(user)
backend = get_backend()
user_obj = backend.get_user(user)
self._clear_override(user_obj)
# Make sure we have the user we think...
ds_overrider_1 = create_dataset_from_query(user, "ds_overrider_1", "SELECT (1)")
url = reverse("sqlshare_view_dataset_tags", kwargs={ 'owner': user,
'name': "ds_overrider_1"})
response = self.client.get(url, **user_auth_headers)
self.assertEquals(response.status_code, 200)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
# Now test get as someone else.
response = self.client.get(url, **user_auth_headers)
self.assertEquals(response.status_code, 403)
def test_file_upload_init(self):
self.remove_users = []
user = "overrider4"
self.remove_users.append(user)
self.remove_users.append("over2")
auth_headers = self.get_auth_header_for_username(user)
data1 = "col1,col2,XXcol3\na,1,2\nb,2,3\nc,3,4\n"
init_url = reverse("sqlshare_view_file_upload_init")
backend = get_backend()
user_obj = backend.get_user(user)
# Do the initial file upload as the other user, make sure actual user
# can't see the parser values.
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response1 = self.client.post(init_url, data=data1, content_type="text/plain", **auth_headers)
self.assertEquals(response1.status_code, 201)
body = response1.content.decode("utf-8")
re.match("^\d+$", body)
upload_id = int(body)
parser_url = reverse("sqlshare_view_file_parser", kwargs={ "id":upload_id })
response2 = self.client.get(parser_url, **auth_headers)
self.assertEquals(response2.status_code, 200)
self._clear_override(user_obj)
parser_url = reverse("sqlshare_view_file_parser", kwargs={ "id":upload_id })
response2 = self.client.get(parser_url, **auth_headers)
self.assertEquals(response2.status_code, 403)
def test_file_upload_process(self):
self.remove_users = []
user = "overrider5"
self.remove_users.append(user)
self.remove_users.append("over2")
auth_headers = self.get_auth_header_for_username(user)
data1 = "col1,col2,XXcol3\na,1,2\nb,2,3\nc,3,4\n"
data2 = "z,999,2\ny,2,3\nx,30,41"
init_url = reverse("sqlshare_view_file_upload_init")
backend = get_backend()
user_obj = backend.get_user(user)
# Do the initial file upload as the other user, make sure actual user
# can't upload more data.
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response1 = self.client.post(init_url, data=data1, content_type="text/plain", **auth_headers)
self.assertEquals(response1.status_code, 201)
body = response1.content.decode("utf-8")
re.match("^\d+$", body)
upload_id = int(body)
parser_url = reverse("sqlshare_view_file_parser", kwargs={ "id":upload_id })
response2 = self.client.get(parser_url, **auth_headers)
self.assertEquals(response2.status_code, 200)
parser_url = reverse("sqlshare_view_file_parser", kwargs={ "id":upload_id })
response2 = self.client.get(parser_url, **auth_headers)
self.assertEquals(response2.status_code, 200)
self._clear_override(user_obj)
upload_url = reverse("sqlshare_view_file_upload", kwargs={ "id":upload_id })
# Send the rest of the file:
response6 = self.client.post(upload_url, data=data2, content_type="application/json", **auth_headers)
self.assertEquals(response6.status_code, 403)
self._override(user_obj, user2)
response6 = self.client.post(upload_url, data=data2, content_type="application/json", **auth_headers)
self.assertEquals(response6.status_code, 200)
# Make sure the original user can't finalize the dataset
self._clear_override(user_obj)
finalize_url = reverse("sqlshare_view_upload_finalize", kwargs={ "id": upload_id })
finalize_data = json.dumps({ "dataset_name": "test_dataset1",
"description": "Just a test description"
})
# Make sure no one else can do it!
response8 = self.client.post(finalize_url, data=finalize_data, content_type="application/json", **auth_headers)
self.assertEquals(response8.status_code, 403)
self._override(user_obj, user2)
response8 = self.client.post(finalize_url, data=finalize_data, content_type="application/json", **auth_headers)
self.assertEquals(response8.status_code, 202)
def test_query_list(self):
self.remove_users = []
user = "overrider6"
self.remove_users.append(user)
self.remove_users.append("over2")
auth_headers = self.get_auth_header_for_username(user)
Query.objects.all().delete()
backend = get_backend()
user_obj = backend.get_user(user)
self._clear_override(user_obj)
query1 = create_query(user, "SELECT (1)")
query2 = create_query(user, "SELECT (1)")
query3 = create_query(user, "SELECT (1)")
url = reverse("sqlshare_view_query_list")
auth_headers = self.get_auth_header_for_username(user)
response = self.client.get(url, **auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 3)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
response = self.client.get(url, **auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 0)
def test_query_post(self):
self.remove_users = []
user = "overrider7"
self.remove_users.append(user)
self.remove_users.append("over2")
auth_headers = self.get_auth_header_for_username(user)
url = reverse("sqlshare_view_query_list")
Query.objects.all().delete()
backend = get_backend()
user_obj = backend.get_user(user)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
# make that query as the override user:
data = {
"sql": "select(1)"
}
response = self.client.post(url, data=json.dumps(data), content_type='application/json', **auth_headers)
self.assertEquals(response.status_code, 202)
# find the query as the override...
response = self.client.get(url, **auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 1)
self._clear_override(user_obj)
# make sure the original user can't see the query
response = self.client.get(url, **auth_headers)
self.assertEquals(response.status_code, 200)
data = json.loads(response.content.decode("utf-8"))
self.assertEquals(len(data), 0)
def test_dataset_permissions(self):
self.remove_users = []
user = "overrider8"
self.remove_users.append(user)
self.remove_users.append("over2")
user_auth_headers = self.get_auth_header_for_username(user)
backend = get_backend()
user_obj = backend.get_user(user)
self._clear_override(user_obj)
# Make sure we have the user we think...
ds_overrider_1 = create_dataset_from_query(user, "ds_overrider_2", "SELECT (1)")
url = reverse("sqlshare_view_dataset_permissions", kwargs={ 'owner': user,
'name': "ds_overrider_2"})
response = self.client.get(url, **user_auth_headers)
self.assertEquals(response.status_code, 200)
user2 = backend.get_user("over2")
self._override(user_obj, user2)
# Now test get as someone else.
response = self.client.get(url, **user_auth_headers)
self.assertEquals(response.status_code, 403)
def test_access_tokens(self):
self.remove_users = []
user = "overrider_owner_sharer1"
self.remove_users.append(user)
self.remove_users.append("override_3rd_party")
self.remove_users.append("overrider_recipient2")
self.remove_users.append("over3")
backend = get_backend()
backend.get_user(user)
user_obj = backend.get_user("overrider_recipient2")
auth_headers = self.get_auth_header_for_username("overrider_recipient2")
self._clear_override(user_obj)
ds_overrider_1 = create_dataset_from_query("override_3rd_party", "ds_overrider_access_token", "SELECT (1)")
set_dataset_emails(ds_overrider_1, [ "[email protected]" ])
ds_overrider_1.is_shared = True
ds_overrider_1.save()
sharing = DatasetSharingEmail.objects.filter(dataset=ds_overrider_1)[0]
email = sharing.email
access_token1 = sharing.access_token
user2 = backend.get_user("over3")
self._override(user_obj, user2)
# Get the access token url while overriden, and make sure the original
# user doesn't have access:
token1_url = reverse("sqlshare_token_access", kwargs={"token": access_token1})
response = self.client.post(token1_url, data={}, **auth_headers)
self.assertEquals(response.status_code, 200)
ds_url = reverse("sqlshare_view_dataset", kwargs={"owner": "override_3rd_party", "name": "ds_overrider_access_token"})
response = self.client.get(ds_url, **auth_headers)
self.assertEquals(response.status_code, 200)
self._clear_override(user_obj)
response = self.client.get(ds_url, **auth_headers)
self.assertEquals(response.status_code, 403)
def _override(self, user1, user2):
user1.override_as = user2
user1.save()
def _clear_override(self, user):
user.override_as = None
user.save()
|
|
"""This script implements the main application logic for Doctrine."""
##==============================================================#
## DEVELOPED 2015, REVISED 2015, Jeff Rimko. #
##==============================================================#
##==============================================================#
## SECTION: Imports #
##==============================================================#
import fnmatch
import os
import shutil
import sys
import tempfile
import time
import uuid
import webbrowser
import zipfile
import os.path as op
from ctypes import *
import PySide
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtWebKit import *
from asciidocapi import AsciiDocAPI
import doctview
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
# Set up the Asciidoc environment.
os.environ['ASCIIDOC_PY'] = op.join(op.dirname(__file__), r"asciidoc\asciidoc.py")
if getattr(sys, 'frozen', None):
os.environ['ASCIIDOC_PY'] = op.normpath(op.join(sys._MEIPASS, r"asciidoc\asciidoc.py"))
# Splash displayed at startup.
SPLASH = r"static\splash.html"
if getattr(sys, 'frozen', None):
SPLASH = op.join(sys._MEIPASS, r"static\splash.html")
SPLASH = QUrl().fromLocalFile(op.abspath(SPLASH))
# Splash displayed at startup.
RENDER = r"static\render.html"
if getattr(sys, 'frozen', None):
RENDER = op.join(sys._MEIPASS, r"static\render.html")
RENDER = QUrl().fromLocalFile(op.abspath(RENDER))
# Prefix of the generated HTML document.
DOCPRE = "__doctrine-"
# Extension of the generated HTML document.
DOCEXT = ".html"
# URL prefix of a local file.
URLFILE = "file:///"
# Name of archive info file.
ARCINFO = "__archive_info__.txt"
# Name and version of the application.
NAMEVER = "Doctrine 0.1.0-alpha"
FILETYPES = dict()
FILETYPES['AsciiDoc'] = ["*.txt", "*.ad", "*.adoc", "*.asciidoc"]
FILETYPES['Zip File'] = ["*.zip"]
##==============================================================#
## SECTION: Class Definitions #
##==============================================================#
class DoctrineApp(QApplication):
"""The main Doctrine application."""
def __init__(self, *args, **kwargs):
"""Initializes the application."""
super(DoctrineApp, self).__init__(*args, **kwargs)
self.aboutToQuit.connect(self._handle_quit)
self._init_ui()
self.deldoc = False
self.docpath = None
#: Path to the temporary rendered document.
self.tmppath = None
#: Path to a temporary directory, if needed.
self.tmpdir = None
def _init_ui(self):
"""Initializes the UI."""
# Set up palette.
pal = self.palette()
col = pal.color(QPalette.Highlight)
pal.setColor(QPalette.Inactive, QPalette.Highlight, col)
col = pal.color(QPalette.HighlightedText)
pal.setColor(QPalette.Inactive, QPalette.HighlightedText, col)
self.setPalette(pal)
# Set up basic UI elements.
self.mainwin = doctview.MainWindow()
self.mainwin.setWindowTitle(NAMEVER)
self.mainwin.actn_reload.setDisabled(True)
self.mainwin.actn_display.setDisabled(True)
self.mainwin.menu_navi.setDisabled(True)
# Set up event handling.
self.mainwin.actn_open.triggered.connect(self._handle_open)
self.mainwin.actn_quit.triggered.connect(self.quit)
self.mainwin.actn_reload.triggered.connect(self._handle_reload)
self.mainwin.actn_frwd.triggered.connect(self._handle_nav_forward)
self.mainwin.actn_back.triggered.connect(self._handle_nav_backward)
self.mainwin.actn_display.triggered.connect(self._handle_display)
self.mainwin.webview.view.linkClicked.connect(self._handle_link)
self.mainwin.webview.view.setAcceptDrops(True)
self.mainwin.webview.view.dragEnterEvent = self._handle_drag
self.mainwin.webview.view.dropEvent = self._handle_drop
self.mainwin.find_dlog.find_btn.clicked.connect(self._handle_find_next)
self.mainwin.find_dlog.prev_btn.clicked.connect(self._handle_find_prev)
# Set up how web links are handled.
self.mainwin.webview.view.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
# Set up keyboard shortcuts.
scut_reload = QShortcut(self.mainwin)
scut_reload.setKey(QKeySequence("F5"))
scut_reload.activated.connect(self._handle_reload)
scut_find1 = QShortcut(self.mainwin)
scut_find1.setKey(QKeySequence("F3"))
scut_find1.activated.connect(self._display_find)
scut_find2 = QShortcut(self.mainwin)
scut_find2.setKey(QKeySequence("Ctrl+F"))
scut_find2.activated.connect(self._display_find)
scut_find_next = QShortcut(self.mainwin)
scut_find_next.setKey(QKeySequence("Ctrl+N"))
scut_find_next.activated.connect(self._handle_find_next)
scut_find_prev = QShortcut(self.mainwin)
scut_find_prev.setKey(QKeySequence("Ctrl+P"))
scut_find_prev.activated.connect(self._handle_find_prev)
# NOTE: Use to create custom context menu.
self.mainwin.webview.view.contextMenuEvent = self._handle_context
self.mainwin.webview.view.mouseReleaseEvent = self._handle_mouse
def _handle_nav_forward(self):
"""Navigates the web view forward."""
self.mainwin.webview.view.page().triggerAction(QWebPage.Forward)
def _handle_nav_backward(self):
"""Navigates the web view back."""
self.mainwin.webview.view.page().triggerAction(QWebPage.Back)
def _handle_find_next(self, event=None):
"""Find the next occurrence of the phrase in the find dialog."""
self._find()
def _handle_find_prev(self, event=None):
"""Find the previous occurrence of the phrase in the find dialog."""
options = QWebPage.FindBackward
self._find(options)
def _find(self, options=0):
"""Find the phrase in the find dialog."""
text = self.mainwin.find_dlog.find_edit.text()
if self.mainwin.find_dlog.case_cb.checkState():
options |= QWebPage.FindCaseSensitively
self.mainwin.webview.view.findText(text, options=options)
def _handle_mouse(self, event=None):
"""Handles mouse release events."""
if event.button() == Qt.MouseButton.XButton1:
self._handle_nav_backward()
return
if event.button() == Qt.MouseButton.XButton2:
self._handle_nav_forward()
return
return QWebView.mouseReleaseEvent(self.mainwin.webview.view, event)
def _handle_context(self, event=None):
"""Handles context menu creation events."""
if self.docpath:
menu = QMenu()
menu.addAction(self.mainwin.webview.style().standardIcon(QStyle.SP_BrowserReload), "Reload", self._handle_reload)
menu.exec_(event.globalPos())
def _handle_drag(self, event=None):
"""Handles drag enter events."""
event.accept()
def _handle_drop(self, event=None):
"""Handles drag-and-drop events."""
if event.mimeData().hasUrls():
self._load_doc(str(event.mimeData().urls()[0].toLocalFile()))
def _handle_quit(self):
"""Handles quitting the application."""
self._delete_tmppath()
self._delete_tmpdir()
def _handle_display(self):
"""Handles displaying the document in the web view."""
if not self.docpath:
return
if not self.tmppath:
self._load_doc(reload_=True)
if not self.tmppath:
return
webbrowser.open(self.tmppath)
def _handle_reload(self):
"""Handles reloading the document."""
if self.docpath:
self._load_doc(reload_=True)
def _display_find(self):
"""Displays the find dialog."""
self.mainwin.find_dlog.show()
self.mainwin.find_dlog.activateWindow()
self.mainwin.find_dlog.find_edit.setFocus()
def _handle_link(self, url=None):
"""Handles link clicked events."""
# Open URLs to webpages with default browser.
if is_webpage(url):
webbrowser.open(str(url.toString()))
return
# Open links to Asciidoc files in Doctrine.
if is_asciidoc(url2path(url)):
self._load_doc(url2path(url))
return
# Open the URL in the webview.
self.mainwin.webview.view.load(url)
def _handle_open(self):
"""Handles open file menu events."""
path = self.mainwin.show_open_file(format_filter(FILETYPES))
self._load_doc(path)
def _load_doc(self, path="", reload_=False):
"""Handles loading the document to view."""
# Delete existing temp files.
self._delete_tmppath()
self._delete_tmpdir()
# If not reloading the previous document, clear out tmppath.
if not reload_:
self.tmppath = None
self.tmpdir = None
# Set the doc path.
prev = self.docpath
if path:
self.docpath = path
if not self.docpath:
return
self.docpath = op.abspath(self.docpath)
self.setOverrideCursor(QCursor(Qt.WaitCursor))
# Attempt to prepare the document for display.
url = ""
if self.docpath.endswith(".txt"):
url = self._prep_text()
elif self.docpath.endswith(".zip"):
url = self._prep_archive()
elif self.docpath.endswith(".csv"):
url = self._prep_csv()
# NOTE: URL is populated only if ready to display output.
if url:
self.mainwin.webview.view.load(url)
self.mainwin.actn_reload.setDisabled(False)
self.mainwin.actn_display.setDisabled(False)
self.mainwin.menu_navi.setDisabled(False)
self.mainwin.setWindowTitle("%s (%s) - %s" % (
op.basename(self.docpath),
op.dirname(self.docpath),
NAMEVER))
elif prev:
self.docpath = prev
self.restoreOverrideCursor()
def _prep_text(self):
"""Prepares a text document for viewing."""
if not self.docpath:
return
if not self.tmppath:
self.tmppath = getuniqname(op.dirname(self.docpath), DOCEXT, DOCPRE)
try:
AsciiDocAPI().execute(self.docpath, self.tmppath)
except:
self.restoreOverrideCursor()
err_msg = str(sys.exc_info()[0])
err_msg += "\n"
err_msg += str(sys.exc_info()[1])
self.mainwin.show_error_msg(err_msg)
return QUrl().fromLocalFile(self.tmppath)
def _prep_archive(self):
"""Prepares an archive for viewing."""
if not self.docpath:
return
if not self.tmpdir:
self.tmpdir = tempfile.mkdtemp()
if self.tmpdir and not op.isdir(self.tmpdir):
os.makedirs(self.tmpdir)
zfile = zipfile.ZipFile(self.docpath)
zfile.extractall(self.tmpdir)
path = ""
# Attempt to locate archive info file.
arcinfo = op.join(self.tmpdir, ARCINFO)
if op.exists(arcinfo):
path = arcinfo
# If no archive info file found, attempt to locate any asciidoc text file.
if not path:
txts = findfile("*.txt", self.tmpdir)
if txts:
path = txts[0]
# If no text file path was found, bail.
if not path:
return
if not self.tmppath:
self.tmppath = getuniqname(op.dirname(path), DOCEXT, DOCPRE)
AsciiDocAPI().execute(path, self.tmppath)
return QUrl().fromLocalFile(self.tmppath)
def _prep_csv(self):
"""Prepares a CSV file for viewing."""
if not self.docpath:
return
if not self.tmppath:
self.tmppath = getuniqname(op.dirname(self.docpath), DOCEXT, DOCPRE)
path = getuniqname(op.dirname(self.docpath), ".txt", "__temp-")
with open(path, "w") as f:
f.write('[format="csv"]\n')
f.write("|===\n")
f.write("include::" + self.docpath + "[]\n")
f.write("|===\n")
AsciiDocAPI().execute(path, self.tmppath)
os.remove(path)
return QUrl().fromLocalFile(self.tmppath)
def _delete_tmppath(self):
"""Deletes the rendered HTML."""
if not self.tmppath:
return
retries = 3
while retries:
if not op.exists(self.tmppath):
return
try:
os.remove(self.tmppath)
except:
time.sleep(0.1)
retries -= 1
def _delete_tmpdir(self):
"""Deletes the temporary directory."""
if not self.tmpdir:
return
if op.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def show_main(self):
"""Shows the main view of the application."""
self.mainwin.show()
self.mainwin.webview.view.load(SPLASH)
def run_loop(self):
"""Runs the main loop of the application."""
if self.docpath:
self._load_doc()
self.exec_()
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
def getuniqname(base, ext, pre=""):
"""Returns a unique random file name at the given base directory. Does not
create a file."""
while True:
uniq = op.join(base, pre + "tmp" + str(uuid.uuid4())[:6] + ext)
if not os.path.exists(uniq):
break
return op.normpath(uniq)
def is_webpage(url):
"""Returns true if the given URL is for a webpage (rather than a local file)."""
# Handle types.
url = url2str(url)
if type(url) != str:
return False
# Return true if URL is external webpage, false otherwise.
if url.startswith("http:") or url.startswith("https:"):
return True
return False
def findfile(pattern, path):
"""Finds a file matching the given pattern in the given path. Taken from
`http://stackoverflow.com/questions/1724693/find-a-file-in-python`."""
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(op.join(root, name))
return result
def url2path(url):
"""Returns the normalized path of the given URL."""
url = url2str(url)
if url.startswith(URLFILE):
url = url[len(URLFILE):]
return op.normpath(url)
def url2str(url):
"""Returns given URL as a string."""
if type(url) == PySide.QtCore.QUrl:
url = str(url.toString())
return url
def is_asciidoc(path):
"""Returns true if the given path is an Asciidoc file."""
# NOTE: Only checking the extension for now.
if path.endswith(".txt"):
return True
return False
def format_filter(filetypes):
"""Returns a filetype filter formatted for the Open File prompt."""
filt = ""
for t in sorted(filetypes, key=lambda key: filetypes[key]):
filt += "%s (" % (t)
filt += " ".join(e for e in filetypes[t])
filt += ");;"
return filt.strip(";;")
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == '__main__':
# Show the main application.
app = DoctrineApp(sys.argv)
if len(sys.argv) > 1 and op.isfile(sys.argv[1]):
app.docpath = str(sys.argv[1])
app.show_main()
app.run_loop()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009,2010, Oracle and/or its affiliates. All rights reserved.
# Use is subject to license terms. (See COPYING)
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# There are special exceptions to the terms and conditions of the GNU
# General Public License as it is applied to this software. View the
# full text of the exception in file EXCEPTIONS-CLIENT in the directory
# of this software distribution or see the FOSS License Exception at
# www.mysql.com.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Script for running unittests
unittests.py launches all or selected unittests.
Examples:
Setting the MySQL account for running tests
shell> python unittests.py -uroot -D dbtests
Executing only the cursor tests
shell> python unittests.py -t cursor
unittests.py has exit status 0 when tests were ran succesful, 1 otherwise.
"""
import sys
import os
import tempfile
import threading
import unittest
import logging
from optparse import OptionParser
if sys.version_info >= (2,4) and sys.version_info < (3,0):
sys.path = ['python2/'] + sys.path
elif sys.version_info >= (3,1):
sys.path = ['python3/'] + sys.path
else:
raise RuntimeError("Python v%d.%d is not supported" %\
sys.version_info[0:2])
sys.exit(1)
import tests
from tests import mysqld
logger = logging.getLogger(tests.LOGGER_NAME)
MY_CNF = """
# MySQL option file for MySQL Connector/Python tests
[mysqld]
basedir = %(mysqld_basedir)s
datadir = %(mysqld_datadir)s
port = %(mysqld_port)d
socket = %(mysqld_socket)s
bind_address = %(mysqld_bind_address)s
server_id = 19771406
sql_mode = ""
default_time_zone = +00:00
log-error = myconnpy_mysqld.err
log-bin = myconnpy_bin
"""
def _add_options(p):
p.add_option('-t','--test', dest='testcase', metavar='NAME',
help='Tests to execute, one of %s' % tests.get_test_names())
p.add_option('-l','--log', dest='logfile', metavar='NAME',
default=None,
help='Log file location (if not given, logging is disabled)')
p.add_option('','--force', dest='force', action="store_true",
default=False,
help='Remove previous MySQL test installation.')
p.add_option('','--keep', dest='keep', action="store_true",
default=False,
help='Keep MySQL installation (i.e. for debugging)')
p.add_option('','--debug', dest='debug', action="store_true",
default=False,
help='Show/Log debugging messages')
p.add_option('','--verbosity', dest='verbosity', metavar='NUMBER',
default='0', type="int",
help='Verbosity of unittests (default 0)')
p.add_option('','--mysql-basedir', dest='mysql_basedir',
metavar='NAME', default='/usr/local/mysql',
help='Where MySQL is installed. This is used to bootstrap and '\
'run a MySQL server which is used for unittesting only.')
p.add_option('','--mysql-topdir', dest='mysql_topdir',
metavar='NAME',
default=os.path.join(os.path.dirname(os.path.abspath(__file__)),
'mysql_myconnpy'),
help='Where to bootstrap the new MySQL instance for testing. '\
'Defaults to current ./mysql_myconnpy')
p.add_option('','--bind-address', dest='bind_address', metavar='NAME',
default='127.0.0.1',
help='IP address to bind to')
p.add_option('-P','--port', dest='port', metavar='NUMBER',
default='33770', type="int",
help='Port to use for TCP/IP connections.')
def _set_config(options, unix_socket=None):
if options.bind_address:
tests.MYSQL_CONFIG['host'] = options.bind_address
if options.port:
tests.MYSQL_CONFIG['port'] = options.port
if unix_socket:
tests.MYSQL_CONFIG['unix_socket'] = unix_socket
tests.MYSQL_CONFIG['user'] = 'root'
tests.MYSQL_CONFIG['password'] = ''
tests.MYSQL_CONFIG['database'] = 'myconnpy'
def _show_help(msg=None,parser=None,exit=0):
tests.printmsg(msg)
if parser is not None:
parser.print_help()
if exit > -1:
sys.exit(exit)
def main():
usage = 'usage: %prog [options]'
parser = OptionParser()
_add_options(parser)
# Set options
(options, args) = parser.parse_args()
option_file = os.path.join(options.mysql_topdir,'myconnpy_my.cnf')
unix_socket = os.path.join(options.mysql_topdir,'myconnpy_mysql.sock')
_set_config(options, unix_socket=unix_socket)
# Init the MySQL Server object
mysql_server = mysqld.MySQLInit(options.mysql_basedir,
options.mysql_topdir,
MY_CNF,
option_file,
options.bind_address,
options.port,
unix_socket)
mysql_server._debug = options.debug
# Force removal of previous test data
if options.force is True:
mysql_server.remove()
# Which tests cases to run
if options.testcase is not None:
if options.testcase in tests.get_test_names():
testcases = [ 'tests.test_%s' % options.testcase ]
else:
msg = "Test case is not one of %s" % tests.get_test_names()
_show_help(msg=msg,parser=parser,exit=1)
else:
testcases = tests.active_testcases
# Enabling logging
formatter = logging.Formatter("%(asctime)s [%(name)s:%(levelname)s] %(message)s")
myconnpy_logger = logging.getLogger('myconnpy')
fh = None
if options.logfile is not None:
fh = logging.FileHandler(options.logfile)
else:
fh = logging.StreamHandler()
fh.setFormatter(formatter)
logger.addHandler(fh)
if options.debug is True:
logger.setLevel(logging.DEBUG)
myconnpy_logger.setLevel(logging.DEBUG)
else:
myconnpy_logger.setLevel(logging.INFO)
myconnpy_logger.addHandler(fh)
logger.info("MySQL Connector/Python unittest started")
# Bootstrap and start a MySQL server
mysql_server.bootstrap()
mysql_server.start()
# Run test cases
suite = unittest.TestLoader().loadTestsFromNames(testcases)
result = unittest.TextTestRunner(verbosity=options.verbosity).run(suite)
txt = ""
if not result.wasSuccessful():
txt = "not "
logger.info("MySQL Connector/Python unittests were %ssuccessful" % txt)
# Clean up
mysql_server.stop()
if options.keep is not True:
mysql_server.remove()
# Return result of tests as exit code
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
main()
|
|
# pylint: disable=W0611
'''
Utils
=====
.. versionchanged:: 1.6.0
OrderedDict class has been removed. Use the collections.OrderedDict.
'''
__all__ = ('intersection', 'difference', 'strtotuple',
'get_color_from_hex', 'get_hex_from_color', 'get_random_color',
'is_color_transparent', 'boundary',
'deprecated', 'SafeList',
'interpolate', 'QueryDict',
'platform', 'escape_markup', 'reify')
from os import environ
from sys import platform as _sys_platform
from re import match, split
try:
from UserDict import UserDict
from UserDict import DictMixin
except ImportError:
from collections import UserDict
from collections import MutableMapping as DictMixin
_platform_android = None
_platform_ios = None
def boundary(value, minvalue, maxvalue):
'''Limit a value between a minvalue and maxvalue'''
return min(max(value, minvalue), maxvalue)
def intersection(set1, set2):
'''Return intersection between 2 list'''
return [s for s in set1 if s in set2]
def difference(set1, set2):
'''Return difference between 2 list'''
return [s for s in set1 if s not in set2]
def interpolate(value_from, value_to, step=10):
'''Interpolate a value to another. Can be useful to smooth some transition.
For example::
# instead of setting directly
self.pos = pos
# use interpolate, and you'll have a nice transition
self.pos = interpolate(self.pos, new_pos)
.. warning::
This interpolation work only on list/tuple/double with the same
dimension. No test are done if the dimension is not the same.
'''
if type(value_from) in (list, tuple):
out = []
for x, y in zip(value_from, value_to):
out.append(interpolate(x, y, step))
return out
else:
return value_from + (value_to - value_from) / float(step)
def strtotuple(s):
'''Convert a tuple string into tuple,
with some security check. Designed to be used
with eval() function::
a = (12, 54, 68)
b = str(a) # return '(12, 54, 68)'
c = strtotuple(b) # return (12, 54, 68)
'''
# security
if not match('^[,.0-9 ()\[\]]*$', s):
raise Exception('Invalid characters in string for tuple conversion')
# fast syntax check
if s.count('(') != s.count(')'):
raise Exception('Invalid count of ( and )')
if s.count('[') != s.count(']'):
raise Exception('Invalid count of [ and ]')
r = eval(s)
if type(r) not in (list, tuple):
raise Exception('Conversion failed')
return r
def get_color_from_hex(s):
'''Transform from hex string color to kivy color'''
if s.startswith('#'):
return get_color_from_hex(s[1:])
value = [int(x, 16) / 255.
for x in split('([0-9a-f]{2})', s.lower()) if x != '']
if len(value) == 3:
value.append(1)
return value
def get_hex_from_color(color):
'''Transform from kivy color to hex::
>>> get_hex_from_color((0, 1, 0))
'#00ff00'
>>> get_hex_from_color((.25, .77, .90, .5))
'#3fc4e57f'
.. versionadded:: 1.5.0
'''
return '#' + ''.join(['{0:02x}'.format(int(x * 255)) for x in color])
def get_random_color(alpha=1.0):
''' Returns a random color (4 tuple)
:Parameters:
`alpha` : float, default to 1.0
if alpha == 'random' a random alpha value is generated
'''
from random import random
if alpha == 'random':
return [random(), random(), random(), random()]
else:
return [random(), random(), random(), alpha]
def is_color_transparent(c):
'''Return true if alpha channel is 0'''
if len(c) < 4:
return False
if float(c[3]) == 0.:
return True
return False
DEPRECATED_CALLERS = []
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted the first time
the function is used.'''
import inspect
import functools
@functools.wraps(func)
def new_func(*args, **kwargs):
file, line, caller = inspect.stack()[1][1:4]
caller_id = "%s:%s:%s" % (file, line, caller)
# We want to print deprecated warnings only once:
if caller_id not in DEPRECATED_CALLERS:
DEPRECATED_CALLERS.append(caller_id)
warning = (
'Call to deprecated function %s in %s line %d.'
'Called from %s line %d'
' by %s().') % (
func.__name__,
func.__code__.co_filename,
func.__code__.co_firstlineno + 1,
file, line, caller)
from kivy.logger import Logger
Logger.warn(warning)
if func.__doc__:
Logger.warn(func.__doc__)
return func(*args, **kwargs)
return new_func
class SafeList(list):
'''List with clear() method
.. warning::
Usage of iterate() function will decrease your performance.
'''
def clear(self):
del self[:]
@deprecated
def iterate(self, reverse=False):
if reverse:
return reversed(iter(self))
return iter(self)
class QueryDict(dict):
'''QueryDict is a dict() that can be queried with dot.
.. versionadded:: 1.0.4
::
d = QueryDict()
# create a key named toto, with the value 1
d.toto = 1
# it's the same as
d['toto'] = 1
'''
def __getattr__(self, attr):
try:
return self.__getitem__(attr)
except KeyError:
return super(QueryDict, self).__getattr__(attr)
def __setattr__(self, attr, value):
self.__setitem__(attr, value)
def format_bytes_to_human(size, precision=2):
'''Format a bytes number to human size (B, KB, MB...)
.. versionadded:: 1.0.8
:Parameters:
`size`: int
Number that represent a bytes number
`precision`: int
Precision after the comma
Examples::
>>> format_bytes_to_human(6463)
'6.31 KB'
>>> format_bytes_to_human(646368746541)
'601.98 GB'
'''
size = int(size)
fmt = '%%1.%df %%s' % precision
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return fmt % (size, unit)
size /= 1024.0
class Platform(object):
# refactored to class to allow module function to be replaced
# with module variable
_platform = None
@deprecated
def __call__(self):
return self._get_platform()
def __eq__(self, other):
return other == self._get_platform()
def __ne__(self, other):
return other != self._get_platform()
def __str__(self):
return self._get_platform()
def __repr__(self):
return 'platform name: \'{platform}\' from: \n{instance}'.format(
platform=self._get_platform(),
instance=super(Platform, self).__repr__()
)
def __hash__(self):
return self._get_platform().__hash__()
def _get_platform(self):
if self._platform is not None:
return self._platform
global _platform_ios, _platform_android
if _platform_android is None:
# ANDROID_ARGUMENT and ANDROID_PRIVATE are 2 environment variables
# from python-for-android project
_platform_android = 'ANDROID_ARGUMENT' in environ
if _platform_ios is None:
_platform_ios = (environ.get('KIVY_BUILD', '') == 'ios')
# On android, _sys_platform return 'linux2', so prefer to check the
# import of Android module than trying to rely on _sys_platform.
if _platform_android is True:
return 'android'
elif _platform_ios is True:
return 'ios'
elif _sys_platform in ('win32', 'cygwin'):
return 'win'
elif _sys_platform == 'darwin':
return 'macosx'
elif _sys_platform[:5] == 'linux':
return 'linux'
return 'unknown'
platform = Platform()
'''
.. versionadded:: 1.3.0
Deprecated since 1.8.0: Use platform as variable instaed of a function.\n
Calling platform() will return one of: *win*, *linux*, *android*, *macosx*,
*ios*, or *unknown*.
.. versionchanged:: 1.8.0
`platform` also behaves like a regular variable in comparisons like so::
from kivy import platform
if platform == 'linux':
do_linux_things()
if platform() == 'linux': # triggers deprecation warning
do_more_linux_things()
foo = {'linux' : do_linux_things}
foo[platform]() # calls do_linux_things
p = platform # assigns to a module object
if p is 'android':
do_android_things()
p += 'some string' # error!
'''
def escape_markup(text):
'''
Escape markup characters found in the text. Intended to be used when markup
text is activated on the Label::
untrusted_text = escape_markup('Look at the example [1]')
text = '[color=ff0000]' + untrusted_text + '[/color]'
w = Label(text=text, markup=True)
.. versionadded:: 1.3.0
'''
return text.replace('[', '&bl;').replace(']', '&br;').replace('&', '&')
class reify(object):
'''
Put the result of a method which uses this (non-data) descriptor decorator
in the instance dict after the first call, effectively replacing the
decorator with an instance variable.
It acts like @property, except that the function is only ever called once;
after that, the value is cached as a regular attribute. This gives you lazy
attribute creation on objects that are meant to be immutable.
Taken from Pyramid project.
'''
def __init__(self, func):
self.func = func
self.__doc__ = func.__doc__
def __get__(self, inst, cls):
if inst is None:
return self
retval = self.func(inst)
setattr(inst, self.func.__name__, retval)
return retval
|
|
# -*- coding: utf-8 -*-
import logging
import sys
import argparse
import sqlalchemy.exc
import gmalthgtparser as hgt
import gmaltcli.tools as tools
import gmaltcli.worker as worker
import gmaltcli.database as database
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
def create_read_from_hgt_parser():
""" CLI parser for gmalt-hgtread
:return: cli parser
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(description='Pass along the latitude/longitude of the point you want to '
'know the latitude of and a HGT file. It will look for the '
'elevation of your point into the file and return it.')
parser.add_argument('lat', type=float, help='The latitude of your point (example: 48.861295)')
parser.add_argument('lng', type=float, help='The longitude of your point (example: 2.339703)')
parser.add_argument('hgt_file', type=str, help='The file to load (example: N00E010.hgt)')
return parser
def read_from_hgt():
""" Function called by the console_script `gmalt-hgtread`
Usage:
gmalt-hgtread <lat> <lng> <path to hgt file>
Print on stdout :
Report:
Location: (408P,166L)
Band 1:
Value: 644
"""
parser = create_read_from_hgt_parser()
args = parser.parse_args()
try:
with hgt.HgtParser(args.hgt_file) as hgt_parser:
elev_data = hgt_parser.get_elevation((args.lat, args.lng))
except Exception as e:
logging.error(str(e))
return sys.exit(1)
sys.stdout.write('Report:\n')
sys.stdout.write(' Location: ({}P,{}L)\n'.format(elev_data[1], elev_data[0]))
sys.stdout.write(' Band 1:\n')
sys.stdout.write(' Value: {}\n'.format(elev_data[2]))
return sys.exit(0)
def create_get_hgt_parser():
""" CLI parser for gmalt-hgtget
:return: cli parser
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(description='Download and unzip HGT files from a remote source')
parser.add_argument('dataset', type=tools.dataset_file, action=tools.LoadDatasetAction,
help='A dataset file provided by this package or the path to your own dataset file. Please '
'read documentation to get dataset JSON format')
parser.add_argument('folder', type=tools.writable_folder,
help='Path to the folder where the HGT zip will be downloaded or where the HGT zip have '
'already been downloaded.')
parser.add_argument('--skip-download', dest='skip_download', action='store_true',
help='Set this flag if you don\'t want to download the zip files.')
parser.add_argument('--skip-unzip', dest='skip_unzip', action='store_true',
help='Set this flag if you don\'t want to unzip the HGT zip files')
parser.add_argument('-c', type=int, dest='concurrency', default=1,
help='How many worker will attempt to download or unzip files in parallel')
parser.add_argument('-v', dest='verbose', action='store_true', help='increase verbosity level')
return parser
def get_hgt():
""" Function called by the console_script `gmalt-hgtget`
Usage:
gmalt-hgtget [options] <dataset> <folder>
"""
# Parse command line arguments
parser = create_get_hgt_parser()
args = parser.parse_args()
tools.configure_logging(args.verbose)
logging.info('config - dataset file : %s' % args.dataset)
logging.info('config - parallelism : %i' % args.concurrency)
logging.info('config - folder : %s' % args.folder)
try:
# Download HGT zip file in a pool of thread
tools.download_hgt_zip_files(args.folder, args.dataset_files, args.concurrency,
skip=args.skip_download)
# Unzip in folder all HGT zip files found in folder
tools.extract_hgt_zip_files(args.folder, args.concurrency, skip=args.skip_unzip)
except KeyboardInterrupt:
pass
except worker.WorkerPoolException:
# in case of ThreadPoolException, the worker which raised the error
# logs it using logging.exception
return sys.exit(1)
except Exception as exception:
logging.exception(exception)
return sys.exit(1)
return sys.exit(0)
def create_load_hgt_parser():
""" CLI parser for gmalt-hgtload
:return: cli parser
:rtype: :class:`argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(description='Read HGT files and import elevation values into a database')
parser.add_argument('folder', type=tools.existing_folder,
help='Path to the folder where the HGT files are stored.')
parser.add_argument('-c', type=int, dest='concurrency', default=1,
help='How many worker will attempt to load files in parallel')
parser.add_argument('-v', dest='verbose', action='store_true', help='increase verbosity level')
parser.add_argument('-tb', '--traceback', dest='traceback', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('-e', '--echo', dest='echo', action='store_true', help=argparse.SUPPRESS)
# Database connection args
db_group = parser.add_argument_group('database', 'database connection configuration')
db_group.add_argument('--type', type=str, dest='type', default="postgres",
help='The type of your database (default : postgres)')
db_group.add_argument('-H', '--host', type=str, dest='host', default="localhost",
help='The hostname of the database')
db_group.add_argument('-P', '--port', type=int, dest='port', help='The port of the database')
db_group.add_argument('-d', '--db', type=str, dest='database', default="gmalt", help='The name of the database')
db_group.add_argument('-u', '--user', type=str, dest='username', required=True,
help='The user to connect to the database')
db_group.add_argument('-p', '--pass', type=str, dest='password', help='The password to connect to the database')
db_group.add_argument('-t', '--table', type=str, dest='table', default="elevation",
help='The table name to import data')
# Raster configuration
gis_group = parser.add_argument_group('gis', 'GIS configuration')
gis_group.add_argument('-r', '--raster', dest='use_raster', action='store_true',
help='Use raster to import data. Your database must have GIS capabilities '
'like PostGIS for PostgreSQL.')
gis_group.add_argument('-s', '--sample', nargs=2, type=int, dest='sample', metavar=('LNG_SAMPLE', 'LAT_SAMPLE'),
default=(None, None), help="Separate a HGT file in multiple rasters. Sample on lng axis "
"and lat axis.")
gis_group.add_argument('--skip-raster2pgsql-check', dest='check_raster2pgsql', default=True, action='store_false',
help='Skip raster2pgsql presence check')
return parser
def load_hgt():
""" Function called by the console_script `gmalt-hgtload`
Usage:
gmalt-hgtload [options] -u <user> <folder>
"""
# Parse command line arguments
parser = create_load_hgt_parser()
args = vars(parser.parse_args())
# logging
traceback = args.pop('traceback')
tools.configure_logging(args.pop('verbose'), echo=args.pop('echo'))
# Pop everything not related to database uri string
concurrency = args.pop('concurrency')
folder = args.pop('folder')
use_raster = args.pop('use_raster')
samples = args.pop('sample')
db_driver = args.pop('type')
table_name = args.pop('table')
check_raster2pgsql = args.pop('check_raster2pgsql')
# sqlalchemy.engine.url.URL args
db_info = args
# If postgres driver and raster2pgsql is available, propose to use this solution instead.
if db_driver == 'postgres' and use_raster and check_raster2pgsql and tools.check_for_raster2pgsql():
sys.exit(0)
logging.info('config - parallelism : %i' % concurrency)
logging.info('config - folder : %s' % folder)
logging.info('config - db driver : %s' % db_driver)
logging.info('config - db host : %s' % db_info.get('host'))
logging.info('config - db user : %s' % db_info.get('username'))
logging.info('config - db name : %s' % db_info.get('database'))
logging.info('config - db table : %s' % table_name)
if use_raster:
logging.debug('config - use raster : %s' % use_raster)
logging.debug('config - raster sampling : {}'.format('{}x{}'.format(*samples) if samples[0] else 'none'))
# create sqlalchemy engine
factory = database.ManagerFactory(db_driver, table_name, pool_size=concurrency, **db_info)
try:
# First validate that the database is ready
with factory.get_manager(use_raster) as manager:
manager.prepare_environment()
# Then process HGT files
tools.import_hgt_zip_files(folder, concurrency, factory, use_raster, samples)
except sqlalchemy.exc.OperationalError:
logging.error('Unable to connect to database with these settings : {}'.format(factory.engine.url),
exc_info=traceback)
except database.NotSupportedException:
logging.error('Database does not support raster settings. Have you enabled GIS extension ?', exc_info=traceback)
return sys.exit(1)
except KeyboardInterrupt:
return sys.exit(0)
except worker.WorkerPoolException:
# in case of ThreadPoolException, the worker which raised the error
# logs it using logging.exception
return sys.exit(1)
except Exception as e:
logging.error('Unknown error : {}'.format(str(e)), exc_info=traceback)
return sys.exit(1)
return sys.exit(0)
|
|
from .solverwrapper import SolverWrapper
import numpy as np
from ..constraint import ConstraintType
from ..constants import QPOASES_INFTY, TINY, SMALL
from ..exceptions import SolverNotFound
try:
from qpoases import (
PyOptions as Options,
PyPrintLevel as PrintLevel,
PyReturnValue as ReturnValue,
PySQProblem as SQProblem,
)
qpoases_FOUND = True
except ImportError:
qpoases_FOUND = False
import logging
logger = logging.getLogger(__name__)
class hotqpOASESSolverWrapper(SolverWrapper):
"""`qpOASES` solver wrapper with hot-start.
This wrapper takes advantage of the warm-start capability of the
qpOASES quadratic programming solver. It uses two different
qp solvers. One to solve for maximized controllable sets and one to
solve for minimized controllable sets. The wrapper selects which solver
to use by looking at the optimization direction.
This solver wrapper also scale data before invoking `qpOASES`.
If the logger "toppra" is set to debug level, qpoases solvers are
initialized with PrintLevel.HIGH. Otherwise, these are initialized
with PrintLevel.NONE
Currently only support Canonical Linear Constraints.
Parameters
----------
constraint_list: :class:`.Constraint` []
The constraints the robot is subjected to.
path: :class:`.Interpolator`
The geometric path.
path_discretization: array
The discretized path positions.
disable_check: bool, optional
Disable check for solution validity. Improve speed by about
20% but entails the possibility that failure is not reported
correctly.
scaling_solverwrapper: bool, optional
If is True, try to scale the data of each optimization before
running. Important: Currently scaling is always done
regardless of the value of this variable. To be fixed.
"""
def __init__(
self,
constraint_list,
path,
path_discretization,
disable_check=False,
scaling_solverwrapper=True,
):
if not qpoases_FOUND:
SolverNotFound("toppra is unable to find any installation of qpoases!")
super(hotqpOASESSolverWrapper, self).__init__(
constraint_list, path, path_discretization
)
self._disable_check = disable_check
# First constraint is x + 2 D u <= xnext_max, second is xnext_min <= x + 2D u
self.nC = 2 # number of Constraints.
for i, constraint in enumerate(constraint_list):
if constraint.get_constraint_type() != ConstraintType.CanonicalLinear:
raise NotImplementedError
a, b, c, F, v, _, _ = self.params[i]
if a is not None:
if constraint.identical:
self.nC += F.shape[0]
else:
self.nC += F.shape[1]
# qpOASES coefficient arrays
# l <= var <= h
# lA <= A var <= hA
self._A = np.zeros((self.nC, self.nV))
self._lA = -np.ones(self.nC) * QPOASES_INFTY
self._hA = np.ones(self.nC) * QPOASES_INFTY
self._l = -np.ones(2) * QPOASES_INFTY
self._h = np.ones(2) * QPOASES_INFTY
def setup_solver(self):
"""Initiate two internal solvers for warm-start.
"""
option = Options()
if logger.getEffectiveLevel() == logging.DEBUG:
# option.printLevel = PrintLevel.HIGH
option.printLevel = PrintLevel.NONE
else:
option.printLevel = PrintLevel.NONE
self.solver_minimizing = SQProblem(self.nV, self.nC)
self.solver_minimizing.setOptions(option)
self.solver_maximizing = SQProblem(self.nV, self.nC)
self.solver_maximizing.setOptions(option)
self.solver_minimizing_recent_index = -2
self.solver_maximizing_recent_index = -2
def close_solver(self):
self.solver_minimizing = None
self.solver_maximizing = None
def solve_stagewise_optim(self, i, H, g, x_min, x_max, x_next_min, x_next_max):
assert i <= self.N and 0 <= i
# solve the scaled optimization problem
# min 0.5 y^T scale H scale y + g^T scale y
# s.t lA <= A scale y <= hA
# l <= scale y <= h
self._l[:] = -QPOASES_INFTY
self._h[:] = QPOASES_INFTY
if x_min is not None:
self._l[1] = max(self._l[1], x_min)
if x_max is not None:
self._h[1] = min(self._h[1], x_max)
if i < self.N:
delta = self.get_deltas()[i]
if x_next_min is not None:
self._A[0] = [-2 * delta, -1]
self._hA[0] = -x_next_min
else:
self._A[0] = [0, 0]
self._hA[0] = QPOASES_INFTY
self._lA[0] = -QPOASES_INFTY
if x_next_max is not None:
self._A[1] = [2 * delta, 1]
self._hA[1] = x_next_max
else:
self._A[1] = [0, 0]
self._hA[1] = QPOASES_INFTY
self._lA[1] = -QPOASES_INFTY
cur_index = 2
for j in range(len(self.constraints)):
a, b, c, F, v, ubound, xbound = self.params[j]
if a is not None:
if self.constraints[j].identical:
nC_ = F.shape[0]
self._A[cur_index : cur_index + nC_, 0] = F.dot(a[i])
self._A[cur_index : cur_index + nC_, 1] = F.dot(b[i])
self._hA[cur_index : cur_index + nC_] = v - F.dot(c[i])
self._lA[cur_index : cur_index + nC_] = -QPOASES_INFTY
else:
nC_ = F[i].shape[0]
self._A[cur_index : cur_index + nC_, 0] = F[i].dot(a[i])
self._A[cur_index : cur_index + nC_, 1] = F[i].dot(b[i])
self._hA[cur_index : cur_index + nC_] = v[i] - F[i].dot(c[i])
self._lA[cur_index : cur_index + nC_] = -QPOASES_INFTY
cur_index = cur_index + nC_
if ubound is not None:
self._l[0] = max(self._l[0], ubound[i, 0])
self._h[0] = min(self._h[0], ubound[i, 1])
if xbound is not None:
self._l[1] = max(self._l[1], xbound[i, 0])
self._h[1] = min(self._h[1], xbound[i, 1])
# if x_min == x_max, do not solve the 2D linear program, instead, do a line search
if abs(x_min - x_max) < TINY and H is None and self.get_no_vars() == 2:
logger.debug("x_min ({:f}) equals x_max ({:f})".format(x_min, x_max))
u_min = -QPOASES_INFTY
u_max = QPOASES_INFTY
for i in range(self._A.shape[0]):
if self._A[i, 0] > 0:
u_max = min(
u_max, (self._hA[i] - self._A[i, 1] * x_min) / self._A[i, 0]
)
elif self._A[i, 0] < 0:
u_min = max(
u_min, (self._hA[i] - self._A[i, 1] * x_min) / self._A[i, 0]
)
if (u_min - u_max) / abs(u_max) > SMALL: # problem infeasible
logger.debug(
"u_min > u_max by {:f}. Might not be critical. "
"Returning failure.".format(u_min - u_max)
)
return np.array([np.nan, np.nan])
if g[0] < 0:
return np.array([u_max, x_min + 2 * u_max * delta])
else:
return np.array([u_min, x_min + 2 * u_min * delta])
if H is None:
H = (
np.ones((self.get_no_vars(), self.get_no_vars())) * 1e-18
) # regularization, very important
ratio_col1 = 1 / (
np.sum(np.abs(self._A[2:, 0])) + 1e-5
) # the maximum possible value for both ratios is 100000
ratio_col2 = 1 / (np.sum(np.abs(self._A[2:, 1])) + 1e-5)
variable_scales = np.array([ratio_col1, ratio_col2])
# variable_scales = np.array([5000.0, 2000.0])
variable_scales_mat = np.diag(variable_scales)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"min ratio col 1 {:f}, col 2 {:f}".format(ratio_col1, ratio_col2)
)
# ratio scaling
self._A = self._A.dot(variable_scales_mat)
self._l = self._l / variable_scales
self._h = self._h / variable_scales
self._g = g * variable_scales
self._H = variable_scales_mat.dot(H).dot(variable_scales_mat)
# rows scaling
row_magnitude = np.sum(np.abs(self._A), axis=1)
row_scaling_mat = np.diag((row_magnitude + 1) ** (-1))
self._A = np.dot(row_scaling_mat, self._A)
self._lA = np.dot(row_scaling_mat, self._lA)
self._hA = np.dot(row_scaling_mat, self._hA)
return_value, var = self._solve_optimization(i)
if return_value == ReturnValue.SUCCESSFUL_RETURN:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("optimal value: {:}".format(var))
if self._disable_check:
return var * variable_scales
# Check for constraint feasibility
success = (
np.all(self._l <= var + TINY)
and np.all(var <= self._h + TINY)
and np.all(np.dot(self._A, var) <= self._hA + TINY)
and np.all(np.dot(self._A, var) >= self._lA - TINY)
)
if not success:
# import ipdb; ipdb.set_trace()
logger.fatal(
"Hotstart fails but qpOASES does not report correctly. \n "
"var: {:}, lower_bound: {:}, higher_bound{:}".format(
var, self._l, self._h
)
)
# TODO: Investigate why this happen and fix the
# relevant code (in qpOASES wrapper)
else:
return var * variable_scales
else:
logger.debug("Optimization fails. qpOASES error code: %d.", return_value)
if (
np.all(0 <= self._hA)
and np.all(0 >= self._lA)
and np.all(0 <= self._h)
and np.all(0 >= self._l)
):
logger.fatal(
"(0, 0) satisfies all constraints => error due to numerical errors.",
self._A,
self._lA,
self._hA,
self._l,
self._h,
)
else:
logger.debug("(0, 0) does not satisfy all constraints.")
return_value = np.empty(self.get_no_vars())
return_value[:] = np.nan
return return_value
def _solve_optimization(self, i):
var = np.zeros(self.nV)
if self._g[1] > 0: # Choose solver_minimizing
if abs(self.solver_minimizing_recent_index - i) > 1:
logger.debug("solver_minimizing [init]")
return_value = self.solver_minimizing.init(
self._H,
self._g,
self._A,
self._l,
self._h,
self._lA,
self._hA,
np.array([1000]),
)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("solver_minimizing [hotstart]")
return_value = self.solver_minimizing.hotstart(
self._H,
self._g,
self._A,
self._l,
self._h,
self._lA,
self._hA,
np.array([1000]),
)
self.solver_minimizing_recent_index = i
self.solver_minimizing.getPrimalSolution(var)
else: # Choose solver_maximizing
if abs(self.solver_maximizing_recent_index - i) > 1:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("solver_maximizing [init]")
return_value = self.solver_maximizing.init(
self._H,
self._g,
self._A,
self._l,
self._h,
self._lA,
self._hA,
np.array([1000]),
)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug("solver_maximizing [hotstart]")
return_value = self.solver_maximizing.hotstart(
self._H,
self._g,
self._A,
self._l,
self._h,
self._lA,
self._hA,
np.array([1000]),
)
self.solver_maximizing_recent_index = i
self.solver_maximizing.getPrimalSolution(var)
return return_value, var
|
|
# -*- coding: utf-8-*-
"""
A Speaker handles audio output from Jasper to the user
Speaker methods:
say - output 'phrase' as speech
play - play the audio in 'filename'
is_available - returns True if the platform supports this implementation
"""
import os
import platform
import re
import tempfile
import subprocess
import pipes
import logging
import wave
import urllib
import urlparse
import requests
from abc import ABCMeta, abstractmethod
import argparse
import yaml
try:
import mad
import gtts
except ImportError:
pass
import diagnose
import jasperpath
class AbstractTTSEngine(object):
"""
Generic parent class for all speakers
"""
__metaclass__ = ABCMeta
@classmethod
def get_config(cls):
return {}
@classmethod
def get_instance(cls):
config = cls.get_config()
instance = cls(**config)
return instance
@classmethod
@abstractmethod
def is_available(cls):
return diagnose.check_executable('aplay')
def __init__(self, **kwargs):
self._logger = logging.getLogger(__name__)
@abstractmethod
def say(self, phrase, *args):
pass
def play(self, filename):
# FIXME: Use platform-independent audio-output here
# See issue jasperproject/jasper-client#188
cmd = ['aplay', '-D', 'plughw:1,0', str(filename)]
self._logger.debug('Executing %s', ' '.join([pipes.quote(arg)
for arg in cmd]))
with tempfile.TemporaryFile() as f:
subprocess.call(cmd, stdout=f, stderr=f)
f.seek(0)
output = f.read()
if output:
self._logger.debug("Output was: '%s'", output)
class AbstractMp3TTSEngine(AbstractTTSEngine):
"""
Generic class that implements the 'play' method for mp3 files
"""
@classmethod
def is_available(cls):
return (super(AbstractMp3TTSEngine, cls).is_available() and
diagnose.check_python_import('mad'))
def play_mp3(self, filename):
mf = mad.MadFile(filename)
with tempfile.NamedTemporaryFile(suffix='.wav') as f:
wav = wave.open(f, mode='wb')
wav.setframerate(mf.samplerate())
wav.setnchannels(1 if mf.mode() == mad.MODE_SINGLE_CHANNEL else 2)
# 4L is the sample width of 32 bit audio
wav.setsampwidth(4L)
frame = mf.read()
while frame is not None:
wav.writeframes(frame)
frame = mf.read()
wav.close()
self.play(f.name)
class DummyTTS(AbstractTTSEngine):
"""
Dummy TTS engine that logs phrases with INFO level instead of synthesizing
speech.
"""
SLUG = "dummy-tts"
@classmethod
def is_available(cls):
return True
def say(self, phrase):
self._logger.info(phrase)
def play(self, filename):
self._logger.debug("Playback of file '%s' requested")
pass
class EspeakTTS(AbstractTTSEngine):
"""
Uses the eSpeak speech synthesizer included in the Jasper disk image
Requires espeak to be available
"""
SLUG = "espeak-tts"
def __init__(self, voice='default+m3', pitch_adjustment=40,
words_per_minute=160):
super(self.__class__, self).__init__()
self.voice = voice
self.pitch_adjustment = pitch_adjustment
self.words_per_minute = words_per_minute
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'espeak-tts' in profile:
if 'voice' in profile['espeak-tts']:
config['voice'] = profile['espeak-tts']['voice']
if 'pitch_adjustment' in profile['espeak-tts']:
config['pitch_adjustment'] = \
profile['espeak-tts']['pitch_adjustment']
if 'words_per_minute' in profile['espeak-tts']:
config['words_per_minute'] = \
profile['espeak-tts']['words_per_minute']
return config
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
diagnose.check_executable('espeak'))
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
fname = f.name
cmd = ['espeak', '-v', self.voice,
'-p', self.pitch_adjustment,
'-s', self.words_per_minute,
'-w', fname,
phrase]
cmd = [str(x) for x in cmd]
self._logger.debug('Executing %s', ' '.join([pipes.quote(arg)
for arg in cmd]))
with tempfile.TemporaryFile() as f:
subprocess.call(cmd, stdout=f, stderr=f)
f.seek(0)
output = f.read()
if output:
self._logger.debug("Output was: '%s'", output)
self.play(fname)
os.remove(fname)
class FestivalTTS(AbstractTTSEngine):
"""
Uses the festival speech synthesizer
Requires festival (text2wave) to be available
"""
SLUG = 'festival-tts'
@classmethod
def is_available(cls):
if (super(cls, cls).is_available() and
diagnose.check_executable('text2wave') and
diagnose.check_executable('festival')):
logger = logging.getLogger(__name__)
cmd = ['festival', '--pipe']
with tempfile.SpooledTemporaryFile() as out_f:
with tempfile.SpooledTemporaryFile() as in_f:
logger.debug('Executing %s', ' '.join([pipes.quote(arg)
for arg in cmd]))
subprocess.call(cmd, stdin=in_f, stdout=out_f,
stderr=out_f)
out_f.seek(0)
output = out_f.read().strip()
if output:
logger.debug("Output was: '%s'", output)
return ('No default voice found' not in output)
return False
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
cmd = ['text2wave']
with tempfile.NamedTemporaryFile(suffix='.wav') as out_f:
with tempfile.SpooledTemporaryFile() as in_f:
in_f.write(phrase)
in_f.seek(0)
with tempfile.SpooledTemporaryFile() as err_f:
self._logger.debug('Executing %s',
' '.join([pipes.quote(arg)
for arg in cmd]))
subprocess.call(cmd, stdin=in_f, stdout=out_f,
stderr=err_f)
err_f.seek(0)
output = err_f.read()
if output:
self._logger.debug("Output was: '%s'", output)
self.play(out_f.name)
class FliteTTS(AbstractTTSEngine):
"""
Uses the flite speech synthesizer
Requires flite to be available
"""
SLUG = 'flite-tts'
def __init__(self, voice=''):
super(self.__class__, self).__init__()
self.voice = voice if voice and voice in self.get_voices() else ''
@classmethod
def get_voices(cls):
cmd = ['flite', '-lv']
voices = []
with tempfile.SpooledTemporaryFile() as out_f:
subprocess.call(cmd, stdout=out_f)
out_f.seek(0)
for line in out_f:
if line.startswith('Voices available: '):
voices.extend([x.strip() for x in line[18:].split()
if x.strip()])
return voices
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'flite-tts' in profile:
if 'voice' in profile['flite-tts']:
config['voice'] = profile['flite-tts']['voice']
return config
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
diagnose.check_executable('flite') and
len(cls.get_voices()) > 0)
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
cmd = ['flite']
if self.voice:
cmd.extend(['-voice', self.voice])
cmd.extend(['-t', phrase])
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
fname = f.name
cmd.append(fname)
with tempfile.SpooledTemporaryFile() as out_f:
self._logger.debug('Executing %s',
' '.join([pipes.quote(arg)
for arg in cmd]))
subprocess.call(cmd, stdout=out_f, stderr=out_f)
out_f.seek(0)
output = out_f.read().strip()
if output:
self._logger.debug("Output was: '%s'", output)
self.play(fname)
os.remove(fname)
class MacOSXTTS(AbstractTTSEngine):
"""
Uses the OS X built-in 'say' command
"""
SLUG = "osx-tts"
@classmethod
def is_available(cls):
return (platform.system().lower() == 'darwin' and
diagnose.check_executable('say') and
diagnose.check_executable('afplay'))
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
cmd = ['say', str(phrase)]
self._logger.debug('Executing %s', ' '.join([pipes.quote(arg)
for arg in cmd]))
with tempfile.TemporaryFile() as f:
subprocess.call(cmd, stdout=f, stderr=f)
f.seek(0)
output = f.read()
if output:
self._logger.debug("Output was: '%s'", output)
def play(self, filename):
cmd = ['afplay', str(filename)]
self._logger.debug('Executing %s', ' '.join([pipes.quote(arg)
for arg in cmd]))
with tempfile.TemporaryFile() as f:
subprocess.call(cmd, stdout=f, stderr=f)
f.seek(0)
output = f.read()
if output:
self._logger.debug("Output was: '%s'", output)
class PicoTTS(AbstractTTSEngine):
"""
Uses the svox-pico-tts speech synthesizer
Requires pico2wave to be available
"""
SLUG = "pico-tts"
def __init__(self, language="en-US"):
super(self.__class__, self).__init__()
self.language = language
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
diagnose.check_executable('pico2wave'))
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'pico-tts' in profile and 'language' in profile['pico-tts']:
config['language'] = profile['pico-tts']['language']
return config
@property
def languages(self):
cmd = ['pico2wave', '-l', 'NULL',
'-w', os.devnull,
'NULL']
with tempfile.SpooledTemporaryFile() as f:
subprocess.call(cmd, stderr=f)
f.seek(0)
output = f.read()
pattern = re.compile(r'Unknown language: NULL\nValid languages:\n' +
r'((?:[a-z]{2}-[A-Z]{2}\n)+)')
matchobj = pattern.match(output)
if not matchobj:
raise RuntimeError("pico2wave: valid languages not detected")
langs = matchobj.group(1).split()
return langs
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
fname = f.name
cmd = ['pico2wave', '--wave', fname]
if self.language not in self.languages:
raise ValueError("Language '%s' not supported by '%s'",
self.language, self.SLUG)
cmd.extend(['-l', self.language])
cmd.append(phrase)
self._logger.debug('Executing %s', ' '.join([pipes.quote(arg)
for arg in cmd]))
with tempfile.TemporaryFile() as f:
subprocess.call(cmd, stdout=f, stderr=f)
f.seek(0)
output = f.read()
if output:
self._logger.debug("Output was: '%s'", output)
self.play(fname)
os.remove(fname)
class GoogleTTS(AbstractMp3TTSEngine):
"""
Uses the Google TTS online translator
Requires pymad and gTTS to be available
"""
SLUG = "google-tts"
def __init__(self, language='en'):
super(self.__class__, self).__init__()
self.language = language
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
diagnose.check_python_import('gtts') and
diagnose.check_network_connection())
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if ('google-tts' in profile and
'language' in profile['google-tts']):
config['language'] = profile['google-tts']['language']
return config
@property
def languages(self):
langs = ['af', 'sq', 'ar', 'hy', 'ca', 'zh-CN', 'zh-TW', 'hr', 'cs',
'da', 'nl', 'en', 'eo', 'fi', 'fr', 'de', 'el', 'ht', 'hi',
'hu', 'is', 'id', 'it', 'ja', 'ko', 'la', 'lv', 'mk', 'no',
'pl', 'pt', 'ro', 'ru', 'sr', 'sk', 'es', 'sw', 'sv', 'ta',
'th', 'tr', 'vi', 'cy']
return langs
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
if self.language not in self.languages:
raise ValueError("Language '%s' not supported by '%s'",
self.language, self.SLUG)
tts = gtts.gTTS(text=phrase, lang=self.language)
with tempfile.NamedTemporaryFile(suffix='.mp3', delete=False) as f:
tmpfile = f.name
tts.save(tmpfile)
self.play_mp3(tmpfile)
os.remove(tmpfile)
class MaryTTS(AbstractTTSEngine):
"""
Uses the MARY Text-to-Speech System (MaryTTS)
MaryTTS is an open-source, multilingual Text-to-Speech Synthesis platform
written in Java.
Please specify your own server instead of using the demonstration server
(http://mary.dfki.de:59125/) to save bandwidth and to protect your privacy.
"""
SLUG = "mary-tts"
def __init__(self, server="mary.dfki.de", port="59125", language="en_GB",
voice="dfki-spike"):
super(self.__class__, self).__init__()
self.server = server
self.port = port
self.netloc = '{server}:{port}'.format(server=self.server,
port=self.port)
self.language = language
self.voice = voice
self.session = requests.Session()
@property
def languages(self):
try:
r = self.session.get(self._makeurl('/locales'))
r.raise_for_status()
except requests.exceptions.RequestException:
self._logger.critical("Communication with MaryTTS server at %s " +
"failed.", self.netloc)
raise
return r.text.splitlines()
@property
def voices(self):
r = self.session.get(self._makeurl('/voices'))
r.raise_for_status()
return [line.split()[0] for line in r.text.splitlines()]
@classmethod
def get_config(cls):
# FIXME: Replace this as soon as we have a config module
config = {}
# HMM dir
# Try to get hmm_dir from config
profile_path = jasperpath.config('profile.yml')
if os.path.exists(profile_path):
with open(profile_path, 'r') as f:
profile = yaml.safe_load(f)
if 'mary-tts' in profile:
if 'server' in profile['mary-tts']:
config['server'] = profile['mary-tts']['server']
if 'port' in profile['mary-tts']:
config['port'] = profile['mary-tts']['port']
if 'language' in profile['mary-tts']:
config['language'] = profile['mary-tts']['language']
if 'voice' in profile['mary-tts']:
config['voice'] = profile['mary-tts']['voice']
return config
@classmethod
def is_available(cls):
return (super(cls, cls).is_available() and
diagnose.check_network_connection())
def _makeurl(self, path, query={}):
query_s = urllib.urlencode(query)
urlparts = ('http', self.netloc, path, query_s, '')
return urlparse.urlunsplit(urlparts)
def say(self, phrase):
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
if self.language not in self.languages:
raise ValueError("Language '%s' not supported by '%s'"
% (self.language, self.SLUG))
if self.voice not in self.voices:
raise ValueError("Voice '%s' not supported by '%s'"
% (self.voice, self.SLUG))
query = {'OUTPUT_TYPE': 'AUDIO',
'AUDIO': 'WAVE_FILE',
'INPUT_TYPE': 'TEXT',
'INPUT_TEXT': phrase,
'LOCALE': self.language,
'VOICE': self.voice}
r = self.session.get(self._makeurl('/process', query=query))
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
f.write(r.content)
tmpfile = f.name
self.play(tmpfile)
os.remove(tmpfile)
def get_default_engine_slug():
return 'osx-tts' if platform.system().lower() == 'darwin' else 'espeak-tts'
def get_engine_by_slug(slug=None):
"""
Returns:
A speaker implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError("Invalid slug '%s'", slug)
selected_engines = filter(lambda engine: hasattr(engine, "SLUG") and
engine.SLUG == slug, get_engines())
if len(selected_engines) == 0:
raise ValueError("No TTS engine found for slug '%s'" % slug)
else:
if len(selected_engines) > 1:
print("WARNING: Multiple TTS engines found for slug '%s'. " +
"This is most certainly a bug." % slug)
engine = selected_engines[0]
if not engine.is_available():
raise ValueError(("TTS engine '%s' is not available (due to " +
"missing dependencies, etc.)") % slug)
return engine
def get_engines():
def get_subclasses(cls):
subclasses = set()
for subclass in cls.__subclasses__():
subclasses.add(subclass)
subclasses.update(get_subclasses(subclass))
return subclasses
return [tts_engine for tts_engine in
list(get_subclasses(AbstractTTSEngine))
if hasattr(tts_engine, 'SLUG') and tts_engine.SLUG]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Jasper TTS module')
parser.add_argument('--debug', action='store_true',
help='Show debug messages')
args = parser.parse_args()
logging.basicConfig()
if args.debug:
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
engines = get_engines()
available_engines = []
for engine in get_engines():
if engine.is_available():
available_engines.append(engine)
disabled_engines = list(set(engines).difference(set(available_engines)))
print("Available TTS engines:")
for i, engine in enumerate(available_engines, start=1):
print("%d. %s" % (i, engine.SLUG))
print("")
print("Disabled TTS engines:")
for i, engine in enumerate(disabled_engines, start=1):
print("%d. %s" % (i, engine.SLUG))
print("")
for i, engine in enumerate(available_engines, start=1):
print("%d. Testing engine '%s'..." % (i, engine.SLUG))
engine.get_instance().say("This is a test.")
print("Done.")
|
|
"""Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions."""
from __future__ import generators
from utils import *
import math, random, sys, time, bisect, string
#______________________________________________________________________________
class Problem:
"""The abstract class for a formal problem. You should subclass this and
implement the method successor, and possibly __init__, goal_test, and
path_cost. Then you will create instances of your subclass and solve them
with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial; self.goal = goal
def successor(self, state):
"""Given a state, return a sequence of (action, state) pairs reachable
from this state. If there are many successors, consider an iterator
that yields the successors one at a time, rather than building them
all at once. Iterators will work fine within the framework."""
abstract
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal, as specified in the constructor. Implement this
method if checking against a single self.goal is not enough."""
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self):
"""For optimization problems, each state has a value. Hill-climbing
and related algorithms try to maximize this value."""
abstract
#______________________________________________________________________________
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"Create a search tree Node, derived from a parent by an action."
update(self, state=state, parent=parent, action=action,
path_cost=path_cost, depth=0)
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node %s>" % (self.state,)
def path(self):
"Create a list of nodes from the root to this node."
x, result = self, [self]
while x.parent:
result.append(x.parent)
x = x.parent
return result
def expand(self, problem):
"Return a list of nodes reachable from this node. [Fig. 3.8]"
return [Node(next, self, act,
problem.path_cost(self.path_cost, self.state, act, next))
for (act, next) in problem.successor(self.state)]
#______________________________________________________________________________
## Uninformed Search algorithms
def tree_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
Don't worry about repeated paths to a state. [Fig. 3.8]"""
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
fringe.extend(node.expand(problem))
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return tree_search(problem, FIFOQueue())
def depth_first_tree_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return tree_search(problem, Stack())
def graph_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
If two paths reach a state, only use the best one. [Fig. 3.18]"""
closed = {}
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
if node.state not in closed:
closed[node.state] = True
fringe.extend(node.expand(problem))
return None
def breadth_first_graph_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return graph_search(problem, FIFOQueue())
def depth_first_graph_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return graph_search(problem, Stack())
def depth_limited_search(problem, limit=50):
"[Fig. 3.12]"
def recursive_dls(node, problem, limit):
cutoff_occurred = False
if problem.goal_test(node.state):
return node
elif node.depth == limit:
return 'cutoff'
else:
for successor in node.expand(problem):
result = recursive_dls(successor, problem, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
if cutoff_occurred:
return 'cutoff'
else:
return None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"[Fig. 3.13]"
for depth in xrange(sys.maxint):
result = depth_limited_search(problem, depth)
if result is not 'cutoff':
return result
#______________________________________________________________________________
# Informed (Heuristic) Search
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have depth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
return graph_search(problem, PriorityQueue(min, f))
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search.
Uses the pathmax trick: f(n) = max(f(n), g(n)+h(n))."""
h = h or problem.h
def f(n):
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
return best_first_graph_search(problem, f)
#______________________________________________________________________________
## Other search algorithms
def recursive_best_first_search(problem):
"[Fig. 4.5]"
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node
successors = expand(node, problem)
if len(successors) == 0:
return None, infinity
for s in successors:
s.f = max(s.path_cost + s.h, node.f)
while True:
successors.sort(lambda x,y: x.f - y.f) # Order by lowest f value
best = successors[0]
if best.f > flimit:
return None, best.f
alternative = successors[1]
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result
return RBFS(Node(problem.initial), infinity)
def hill_climbing(problem):
"""From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better. [Fig. 4.11]"""
current = Node(problem.initial)
while True:
neighbor = argmax(expand(node, problem), Node.value)
if neighbor.value() <= current.value():
return current.state
current = neighbor
def exp_schedule(k=20, lam=0.005, limit=100):
"One possible schedule function for simulated annealing"
return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"[Fig. 4.5]"
current = Node(problem.initial)
for t in xrange(sys.maxint):
T = schedule(t)
if T == 0:
return current
next = random.choice(expand(node. problem))
delta_e = next.path_cost - current.path_cost
if delta_e > 0 or probability(math.exp(delta_e/T)):
current = next
def online_dfs_agent(a):
"[Fig. 4.12]"
pass #### more
def lrta_star_agent(a):
"[Fig. 4.12]"
pass #### more
#______________________________________________________________________________
# Genetic Algorithm
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.0, n=20):
"""Call genetic_algorithm on the appropriate parts of a problem.
This requires that the problem has a successor function that generates
reasonable states, and that it has a path_cost function that scores states.
We use the negative of the path_cost function, because costs are to be
minimized, while genetic-algorithm expects a fitness_fn to be maximized."""
states = [s for (a, s) in problem.successor(problem.initial_state)[:n]]
random.shuffle(states)
fitness_fn = lambda s: - problem.path_cost(0, s, None, s)
return genetic_algorithm(states, fitness_fn, ngen, pmut)
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.0):
"""[Fig. 4.7]"""
def reproduce(p1, p2):
c = random.randrange(len(p1))
return p1[:c] + p2[c:]
for i in xrange(ngen):
new_population = []
for i in len(population):
p1, p2 = random_weighted_selections(population, 2, fitness_fn)
child = reproduce(p1, p2)
if random.uniform(0,1) > pmut:
child.mutate()
new_population.append(child)
population = new_population
return argmax(population, fitness_fn)
def random_weighted_selection(seq, n, weight_fn):
"""Pick n elements of seq, weighted according to weight_fn.
That is, apply weight_fn to each element of seq, add up the total.
Then choose an element e with probability weight[e]/total.
Repeat n times, with replacement. """
totals = []; runningtotal = 0
for item in seq:
runningtotal += weight_fn(item)
totals.append(runningtotal)
selections = []
for s in xrange(n):
r = random.uniform(0, totals[-1])
for i in xrange(len(seq)):
if totals[i] > r:
selections.append(seq[i])
break
return selections
#_____________________________________________________________________________
# The remainder of this file implements examples for the search algorithms.
#______________________________________________________________________________
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (verticies) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, dict=None, directed=True):
self.dict = dict or {}
self.directed = directed
if not directed: self.make_undirected()
def make_undirected(self):
"Make a digraph into an undirected graph by adding symmetric edges."
for a in self.dict.keys():
for (b, distance) in self.dict[a].items():
self.connect1(b, a, distance)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed: self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A,{})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.dict.setdefault(a, {})
if b is None: return links
else: return links.get(b)
def nodes(self):
"Return a list of nodes in the graph."
return self.dict.keys()
def UndirectedGraph(dict=None):
"Build a Graph where every edge (including future ones) goes both ways."
return Graph(dict=dict, directed=False)
def RandomGraph(nodes=xrange(10), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
## Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
## Build roads from each city to at least min_links nearest neighbors.
for i in xrange(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node,n): return infinity
return distance(g.locations[n], here)
neighbor = argmin(nodes, distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
romania = UndirectedGraph(Dict(
A=Dict(Z=75, S=140, T=118),
B=Dict(U=85, P=101, G=90, F=211),
C=Dict(D=120, R=146, P=138),
D=Dict(M=75),
E=Dict(H=86),
F=Dict(S=99),
H=Dict(U=98),
I=Dict(V=92, N=87),
L=Dict(T=111, M=70),
O=Dict(Z=71, S=151),
P=Dict(R=97),
R=Dict(S=80),
U=Dict(V=142)))
romania.locations = Dict(
A=( 91, 492), B=(400, 327), C=(253, 288), D=(165, 299),
E=(562, 293), F=(305, 449), G=(375, 270), H=(534, 350),
I=(473, 506), L=(165, 379), M=(168, 339), N=(406, 537),
O=(131, 571), P=(320, 368), R=(233, 410), S=(207, 457),
T=( 94, 410), U=(456, 350), V=(509, 444), Z=(108, 531))
australia = UndirectedGraph(Dict(
T=Dict(),
SA=Dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=Dict(WA=1, Q=1),
NSW=Dict(Q=1, V=1)))
australia.locations = Dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42), V=(145, 37))
class GraphProblem(Problem):
"The problem of searching a graph from one node to another."
def __init__(self, initial, goal, graph):
Problem.__init__(self, initial, goal)
self.graph = graph
def successor(self, A):
"Return a list of (action, result) pairs."
return [(B, B) for B in self.graph.get(A).keys()]
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A,B) or infinity)
def h(self, node):
"h function is straight-line distance from a node's state to goal."
locs = getattr(self.graph, 'locations', None)
if locs:
return int(distance(locs[node.state], locs[self.goal]))
else:
return infinity
#______________________________________________________________________________
#### NOTE: NQueensProblem not working properly yet.
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where the
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of None means that the c-th column has not been
filled in left. We fill in columns left to right."""
def __init__(self, N):
self.N = N
self.initial = [None] * N
def successor(self, state):
"In the leftmost empty column, try all non-conflicting rows."
if state[-1] is not None:
return [] ## All columns filled; no successors
else:
def place(col, row):
new = state[:]
new[col] = row
return new
col = state.index(None)
return [(row, place(col, row)) for row in xrange(self.N)
if not self.conflicted(state, row, col)]
def conflicted(self, state, row, col):
"Would placing a queen at (row, col) conflict with anything?"
for c in xrange(col-1):
if self.conflict(row, col, state[c], c):
return True
return False
def conflict(self, row1, col1, row2, col2):
"Would putting two queens in (row1, col1) and (row2, col2) conflict?"
return (row1 == row2 ## same row
or col1 == col2 ## same column
or row1-col1 == row2-col2 ## same \ diagonal
or row1+col1 == row2+col2) ## same / diagonal
def goal_test(self, state):
"Check if all columns filled, no conflicts."
if state[-1] is None:
return False
for c in xrange(len(state)):
if self.conflicted(state, state[c], c):
return False
return True
#______________________________________________________________________________
## Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
## iterative-repair and related search tehniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in xrange(n*n)]
random.shuffle(cubes)
return map(random.choice, cubes)
## The best 5x5 board found by Boyan, with our word list this board scores
## 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"Print the board in a 2-d array."
n2 = len(board); n = exact_sqrt(n2)
for i in xrange(n2):
if i % n == 0: print
if board[i] == 'Q': print 'Qu',
else: print str(board[i]) + ' ',
print
def boggle_neighbors(n2, cache={}):
""""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in xrange(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i+1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left: neighbors[i].append(i - n - 1)
if not on_right: neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left: neighbors[i].append(i + n - 1)
if not on_right: neighbors[i].append(i + n + 1)
if not on_left: neighbors[i].append(i - 1)
if not on_right: neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"If n2 is a perfect square, return its square root, else raise error."
n = int(math.sqrt(n2))
assert n * n == n2
return n
##_____________________________________________________________________________
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, filename, min_len=3):
lines = open(filename).read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.words[bisect.bisect_left(self.words, word)] == word
def __len__(self):
return len(self.words)
##_____________________________________________________________________________
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board. """
wordlist = None ## A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist("../data/wordlist")
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"Set the board, and find all the words in it."
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in xrange(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q': c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"The words found."
return self.found.keys()
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"The number of words found."
return len(self.found)
##_____________________________________________________________________________
def boggle_hill_climbing(board=None, ntimes=100, print_it=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in xrange(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
print best, _, board
else:
board[i] = oldc ## Change back
if print_it:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
board[i] = random.choice(random.choice(cubes16)) ##random.choice(boyan_best)
return i, oldc
#______________________________________________________________________________
## Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def successor(self, state):
"Return a list of (action, state) pairs reachable from this state."
result = self.problem.successor(state)
self.succs += 1; self.states += len(result)
return result
def goal_test(self, state):
"Return true if the state is a goal."
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def __getattr__(self, attr):
if attr in ('succs', 'goal_tests', 'states'):
return self.__dict__[attr]
else:
return getattr(self.problem, attr)
def __repr__(self):
return '<%4d/%4d/%4d/%s>' % (self.succs, self.goal_tests,
self.states, str(self.found)[0:4])
def compare_searchers(problems, header, searchers=[breadth_first_tree_search,
breadth_first_graph_search, depth_first_graph_search,
iterative_deepening_search, depth_limited_search,
astar_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
compare_searchers(problems=[GraphProblem('A', 'B', romania),
GraphProblem('O', 'N', romania),
GraphProblem('Q', 'WA', australia)],
header=['Searcher', 'Romania(A,B)', 'Romania(O, N)', 'Australia'])
|
|
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for the `station_plot` module."""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from metpy.plots import nws_layout, simple_layout, StationPlot, StationPlotLayout
from metpy.plots.wx_symbols import high_clouds, sky_cover
# Fixtures to make sure we have the right backend and consistent round
from metpy.testing import patch_round, set_agg_backend # noqa: F401
from metpy.units import units
MPL_VERSION = matplotlib.__version__[:3]
@pytest.mark.mpl_image_compare(tolerance={'1.5': 0.04625, '1.4': 4.1}.get(MPL_VERSION, 0.0033),
savefig_kwargs={'dpi': 300}, remove_text=True)
def test_stationplot_api():
"""Test the StationPlot API."""
fig = plt.figure(figsize=(9, 9))
# testing data
x = np.array([1, 5])
y = np.array([2, 4])
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=16)
sp.plot_barb([20, 0], [0, -50])
sp.plot_text('E', ['KOKC', 'ICT'], color='blue')
sp.plot_parameter('NW', [10.5, 15], color='red')
sp.plot_symbol('S', [5, 7], high_clouds, color='green')
sp.ax.set_xlim(0, 6)
sp.ax.set_ylim(0, 6)
return fig
@pytest.mark.mpl_image_compare(tolerance={'1.5': 0.05974, '1.4': 3.7}.get(MPL_VERSION, 0.0033),
savefig_kwargs={'dpi': 300}, remove_text=True)
def test_station_plot_replace():
"""Test that locations are properly replaced."""
fig = plt.figure(figsize=(3, 3))
# testing data
x = np.array([1])
y = np.array([1])
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=16)
sp.plot_barb([20], [0])
sp.plot_barb([5], [0])
sp.plot_parameter('NW', [10.5], color='red')
sp.plot_parameter('NW', [20], color='blue')
sp.ax.set_xlim(-3, 3)
sp.ax.set_ylim(-3, 3)
return fig
@pytest.mark.mpl_image_compare(tolerance={'1.5': 0.036, '1.4': 2.02}.get(MPL_VERSION, 0.00321),
savefig_kwargs={'dpi': 300}, remove_text=True)
def test_stationlayout_api():
"""Test the StationPlot API."""
fig = plt.figure(figsize=(9, 9))
# testing data
x = np.array([1, 5])
y = np.array([2, 4])
data = dict()
data['temp'] = np.array([32., 212.]) * units.degF
data['u'] = np.array([2, 0]) * units.knots
data['v'] = np.array([0, 5]) * units.knots
data['stid'] = ['KDEN', 'KSHV']
data['cover'] = [3, 8]
# Set up the layout
layout = StationPlotLayout()
layout.add_barb('u', 'v', units='knots')
layout.add_value('NW', 'temp', fmt='0.1f', units=units.degC, color='darkred')
layout.add_symbol('C', 'cover', sky_cover, color='magenta')
layout.add_text((0, 2), 'stid', color='darkgrey')
layout.add_value('NE', 'dewpt', color='green') # This should be ignored
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=12)
layout.plot(sp, data)
sp.ax.set_xlim(0, 6)
sp.ax.set_ylim(0, 6)
return fig
def test_station_layout_odd_data():
"""Test more corner cases with data passed in."""
fig = plt.figure(figsize=(9, 9))
# Set up test layout
layout = StationPlotLayout()
layout.add_barb('u', 'v')
layout.add_value('W', 'temperature', units='degF')
# Now only use data without wind and no units
data = dict(temperature=[25.])
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), [1], [2], fontsize=12)
layout.plot(sp, data)
assert True
def test_station_layout_replace():
"""Test that layout locations are replaced."""
layout = StationPlotLayout()
layout.add_text('E', 'temperature')
layout.add_value('E', 'dewpoint')
assert 'E' in layout
assert layout['E'][0] is StationPlotLayout.PlotTypes.value
assert layout['E'][1] == 'dewpoint'
def test_station_layout_names():
"""Test getting station layout names."""
layout = StationPlotLayout()
layout.add_barb('u', 'v')
layout.add_text('E', 'stid')
layout.add_value('W', 'temp')
layout.add_symbol('C', 'cover', lambda x: x)
assert sorted(layout.names()) == ['cover', 'stid', 'temp', 'u', 'v']
@pytest.mark.mpl_image_compare(tolerance={'1.5': 0.05447, '1.4': 3.0}.get(MPL_VERSION, 0.0039),
savefig_kwargs={'dpi': 300}, remove_text=True)
def test_simple_layout():
"""Test metpy's simple layout for station plots."""
fig = plt.figure(figsize=(9, 9))
# testing data
x = np.array([1, 5])
y = np.array([2, 4])
data = dict()
data['air_temperature'] = np.array([32., 212.]) * units.degF
data['dew_point_temperature'] = np.array([28., 80.]) * units.degF
data['air_pressure_at_sea_level'] = np.array([29.92, 28.00]) * units.inHg
data['eastward_wind'] = np.array([2, 0]) * units.knots
data['northward_wind'] = np.array([0, 5]) * units.knots
data['cloud_coverage'] = [3, 8]
data['present_weather'] = [65, 75]
data['unused'] = [1, 2]
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=12)
simple_layout.plot(sp, data)
sp.ax.set_xlim(0, 6)
sp.ax.set_ylim(0, 6)
return fig
@pytest.mark.mpl_image_compare(tolerance={'1.5': 0.1474, '1.4': 7.02}.get(MPL_VERSION, 0.0113),
savefig_kwargs={'dpi': 300}, remove_text=True)
def test_nws_layout():
"""Test metpy's NWS layout for station plots."""
fig = plt.figure(figsize=(3, 3))
# testing data
x = np.array([1])
y = np.array([2])
data = dict()
data['air_temperature'] = np.array([77]) * units.degF
data['dew_point_temperature'] = np.array([71]) * units.degF
data['air_pressure_at_sea_level'] = np.array([999.8]) * units('mbar')
data['eastward_wind'] = np.array([15.]) * units.knots
data['northward_wind'] = np.array([15.]) * units.knots
data['cloud_coverage'] = [7]
data['present_weather'] = [80]
data['high_cloud_type'] = [1]
data['medium_cloud_type'] = [3]
data['low_cloud_type'] = [2]
data['visibility_in_air'] = np.array([5.]) * units.mile
data['tendency_of_air_pressure'] = np.array([-0.3]) * units('mbar')
data['tendency_of_air_pressure_symbol'] = [8]
# Make the plot
sp = StationPlot(fig.add_subplot(1, 1, 1), x, y, fontsize=12, spacing=16)
nws_layout.plot(sp, data)
sp.ax.set_xlim(0, 3)
sp.ax.set_ylim(0, 3)
return fig
@pytest.mark.mpl_image_compare(tolerance={'1.4': 6.68}.get(MPL_VERSION, 1.05),
remove_text=True)
def test_plot_text_fontsize():
"""Test changing fontsize in plot_text."""
fig = plt.figure(figsize=(3, 3))
ax = plt.subplot(1, 1, 1)
# testing data
x = np.array([1])
y = np.array([2])
# Make the plot
sp = StationPlot(ax, x, y, fontsize=36)
sp.plot_text('NW', ['72'], fontsize=24)
sp.plot_text('SW', ['60'], fontsize=4)
sp.ax.set_xlim(0, 3)
sp.ax.set_ylim(0, 3)
return fig
def test_layout_str():
"""Test layout string representation."""
layout = StationPlotLayout()
layout.add_barb('u', 'v')
layout.add_text('E', 'stid')
layout.add_value('W', 'temp')
layout.add_symbol('C', 'cover', lambda x: x)
assert str(layout) == ('{C: (symbol, cover, ...), E: (text, stid, ...), '
'W: (value, temp, ...), barb: (barb, (\'u\', \'v\'), ...)}')
|
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant .realigner.realigner."""
import csv
import itertools
import os
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
from deepvariant import testdata
from deepvariant.protos import realigner_pb2
from deepvariant.realigner import realigner
from deepvariant.realigner import utils
from third_party.nucleus.io import fasta
from third_party.nucleus.io import sam
from third_party.nucleus.protos import reads_pb2
from third_party.nucleus.testing import test_utils
from third_party.nucleus.util import cigar as cigar_utils
from third_party.nucleus.util import ranges
FLAGS = flags.FLAGS
def setUpModule():
testdata.init()
def _get_reads(region):
with sam.SamReader(testdata.CHR20_BAM) as in_sam_reader:
return list(in_sam_reader.query(region))
def _get_reads_and_header(region):
with sam.SamReader(testdata.CHR20_BAM) as in_sam_reader:
return list(in_sam_reader.query(region)), in_sam_reader.header
def _test_assembled_region(region_str, haplotypes=None):
return realigner.AssemblyRegion(
realigner_pb2.CandidateHaplotypes(
span=ranges.parse_literal(region_str), haplotypes=haplotypes or []))
class ReadAssignmentTests(parameterized.TestCase):
def setUp(self):
reads = [
test_utils.make_read('ACG', start=1, cigar='3M', name='read1'),
test_utils.make_read('ACG', start=6, cigar='3M', name='read2'),
test_utils.make_read('ACG', start=9, cigar='3M', name='read3'),
test_utils.make_read('ACG', start=28, cigar='3M', name='read4'),
test_utils.make_read('A' * 10, start=3, cigar='10M', name='read5'),
]
self.reads = {read.fragment_name: read for read in reads}
self.regions = {
'r1': _test_assembled_region('chr1:1-5'),
'r2': _test_assembled_region('chr1:10-15'),
'r3': _test_assembled_region('chr1:20-30'),
}
self.assembled_regions = [self.regions[r] for r in sorted(self.regions)]
def get_reads_by_name(self, names):
return [self.reads[name] for name in names]
def test_construction(self):
aregion = _test_assembled_region('chr1:1-5', haplotypes=['A', 'C'])
self.assertEqual(aregion.region, ranges.parse_literal('chr1:1-5'))
self.assertEqual(aregion.haplotypes, ['A', 'C'])
self.assertEqual(aregion.reads, [])
def test_adding_reads(self):
aregion = _test_assembled_region('chr1:3-15')
# We haven't added any reads, so reads is empty and the span is None.
self.assertEqual(aregion.reads, [])
self.assertIsNone(aregion.read_span)
# Add read2, giving us a real read span and a read in our region's reads.
read_to_add = self.get_reads_by_name(['read2'])[0]
expected_reads = [read_to_add]
aregion.add_read(read_to_add)
self.assertEqual(aregion.reads, expected_reads)
self.assertEqual(aregion.read_span, ranges.parse_literal('chr1:7-9'))
# Add read1, increasing the span on the left.
read_to_add = self.get_reads_by_name(['read1'])[0]
expected_reads += [read_to_add]
aregion.add_read(read_to_add)
self.assertEqual(aregion.reads, expected_reads)
self.assertEqual(aregion.read_span, ranges.parse_literal('chr1:2-9'))
# Finally, add in all of the reads.
reads_to_add = self.get_reads_by_name(['read3', 'read4', 'read5'])
expected_reads += reads_to_add
for read in reads_to_add:
aregion.add_read(read)
self.assertEqual(aregion.reads, expected_reads)
self.assertEqual(aregion.read_span, ranges.parse_literal('chr1:2-31'))
@parameterized.parameters(
# Single read tests.
# read1 overlaps r1.
dict(read_name='read1', expected_region='r1'),
# read2 falls between r1 and r2, should be unassigned.
dict(read_name='read2', expected_region=None),
# read3 starts before r2 but overlaps it.
dict(read_name='read3', expected_region='r2'),
# read4 starts in r3 but extends beyond it.
dict(read_name='read4', expected_region='r3'),
# read5 overlaps r1 and r2 but is more in r2 then r1.
dict(read_name='read5', expected_region='r2'),
)
def test_assign_reads_to_assembled_regions_single_read(
self, read_name, expected_region):
assignment = {expected_region: [read_name]} if expected_region else {}
self.assertReadsGoToCorrectRegions(
reads=self.get_reads_by_name([read_name]),
expected_assignments=assignment)
@parameterized.parameters(
# Let's make sure adding all of the reads together results in the correct
# assignment across all regions.
dict(
read_names=names,
expected_assignments={
'r1': ['read1'],
'r2': ['read3', 'read5'],
'r3': ['read4'],
}) for names in itertools.permutations(
['read1', 'read2', 'read3', 'read4', 'read5']))
def test_assign_reads_to_assembled_regions_multiple_reads(
self, read_names, expected_assignments):
self.assertReadsGoToCorrectRegions(
self.get_reads_by_name(read_names), expected_assignments)
def assertReadsGoToCorrectRegions(self, reads, expected_assignments):
unassigned = realigner.assign_reads_to_assembled_regions(
self.assembled_regions, reads)
# Every read should be in the assembled regions or unassigned.
six.assertCountEqual(
self,
[r for ar in self.assembled_regions for r in ar.reads] + unassigned,
reads)
# Go through each region and make sure the reads that are supposed to
# appear in each region do in appear there.
for region_name, region in self.regions.items():
expected_reads = self.get_reads_by_name(
expected_assignments.get(region_name, []))
six.assertCountEqual(self, region.reads, expected_reads)
class RealignerTest(parameterized.TestCase):
def setUp(self):
self.ref_reader = fasta.IndexedFastaReader(testdata.CHR20_FASTA)
# redacted
FLAGS.ws_use_window_selector_model = True
self.config = realigner.realigner_config(FLAGS)
self.reads_realigner = realigner.Realigner(self.config, self.ref_reader)
@parameterized.parameters(
# Arguments passed by ws_{min,max}_supporting_reads.
dict(
model=None, min_supporting=2, max_supporting=300, use_ws_model=False),
# No flags passed for the window_selection.
dict(
model=None, min_supporting=-1, max_supporting=-1, use_ws_model=False),
# VariantReadsThresholdModel.
dict(
model='VARIANT_READS_THRESHOLD',
min_supporting=-1,
max_supporting=-1,
use_ws_model=True),
# AlleleCountLinearModel.
dict(
model='ALLELE_COUNT_LINEAR',
min_supporting=-1,
max_supporting=-1,
use_ws_model=True),
# Use the default AlleleCountLinearModel.
dict(model=None, min_supporting=-1, max_supporting=-1, use_ws_model=True))
@flagsaver.flagsaver
def test_window_selector_model_flags(self, model, min_supporting,
max_supporting, use_ws_model):
# This indirection is needed because the symbols in testdata are not set
# when the @parameterized decorator is called.
symbol_to_testdata = {
None: None,
'VARIANT_READS_THRESHOLD': testdata.WS_VARIANT_READS_THRESHOLD_MODEL,
'ALLELE_COUNT_LINEAR': testdata.WS_ALLELE_COUNT_LINEAR_MODEL
}
FLAGS.ws_max_num_supporting_reads = max_supporting
FLAGS.ws_min_num_supporting_reads = min_supporting
FLAGS.ws_window_selector_model = symbol_to_testdata[model]
FLAGS.ws_use_window_selector_model = use_ws_model
# We only make sure that reading the model does not crash or raise
# exceptions.
_ = realigner.realigner_config(FLAGS)
@flagsaver.flagsaver
def test_window_selector_model_flags_failures(self):
with six.assertRaisesRegex(
self, ValueError, 'ws_min_supporting_reads should be smaller than ws_'
'max_supporting_reads.'):
FLAGS.ws_max_num_supporting_reads = 1
FLAGS.ws_min_num_supporting_reads = 2
FLAGS.ws_window_selector_model = None
FLAGS.ws_use_window_selector_model = False
_ = realigner.realigner_config(FLAGS)
with six.assertRaisesRegex(
self, ValueError, 'Cannot specify a ws_window_selector_model '
'if ws_use_window_selector_model is False.'):
FLAGS.ws_max_num_supporting_reads = -1
FLAGS.ws_min_num_supporting_reads = -1
FLAGS.ws_window_selector_model = testdata.WS_ALLELE_COUNT_LINEAR_MODEL
FLAGS.ws_use_window_selector_model = False
_ = realigner.realigner_config(FLAGS)
with six.assertRaisesRegex(
self, ValueError, 'Cannot use both ws_min_num_supporting_reads and '
'ws_use_window_selector_model flags.'):
FLAGS.ws_max_num_supporting_reads = -1
FLAGS.ws_min_num_supporting_reads = 1
FLAGS.ws_window_selector_model = None
FLAGS.ws_use_window_selector_model = True
_ = realigner.realigner_config(FLAGS)
with six.assertRaisesRegex(
self, ValueError, 'Cannot use both ws_max_num_supporting_reads and '
'ws_use_window_selector_model flags.'):
FLAGS.ws_max_num_supporting_reads = 1
FLAGS.ws_min_num_supporting_reads = -1
FLAGS.ws_window_selector_model = None
FLAGS.ws_use_window_selector_model = True
_ = realigner.realigner_config(FLAGS)
@parameterized.parameters(
dict(
region_literal='chr20:10,095,379-10,095,500',
expected_window_literal='chr20:10,095,352-10,095,553',
expected_haplotypes={
'TAGTGATCTAGTCCTTTTTGTTGTGCAAAAGGAAGTGCTAAAATCAGAATGAGAACCATGGTCA'
'CCTGACATAGACACAAGTGATGATGATGATGATGATGATGATGATGATGATGATATCCATGTTC'
'AAGTACTAATTCTGGGCAAGACACTGTTCTAAGTGCTATGAATATATTACCTCATTTAATCATC'
'T',
'TAGTGATCTAGTCCTTTTTGTTGTGCAAAAGGAAGTGCTAAAATCAGAATGAGAACCATGGTCA'
'CCTGACATAGACACAAGTGATGATGATGATGATGATGATGATGATGATGATGATGATGATGATA'
'TCCATGTTCAAGTACTAATTCTGGGCAAGACACTGTTCTAAGTGCTATGAATATATTACCTCAT'
'TTAATCATCT'
},
comment='There is a heterozygous 9 bp deletion of tandem TGA repeat.'
),
dict(
region_literal='chr20:10,046,080-10,046,307',
expected_window_literal='chr20:10,046,096-10,046,267',
expected_haplotypes={
'CCCAAAAAAAGAGTTAGGGATGCTGGAAAGGCAGAAAGAAAAGGGAAGGGAAGAGGAAGGGGAA'
'AAGGAAAGAAAAAAAAGAAAGAAAGAAAGAGAAAGAAAGAGAAAGAGAAAGAAAGAGGAAAGAG'
'AGAAAGAGAAAGAGAAGGAAAGAGAAAGAAAGAGAAGGAAAGAG',
'CCCAAAAAAAGAGTTAGGGATGCTGGAAAGGCAGAAAGAAAAGGGAAGGGAAGAGGAAGGGGAA'
'AAGGAAAGAAAAAAAAGAAAGAAAGAAAGAGAAAGAGAAAGAAAGAGGAAAGAGAGAAAGAGAA'
'AGAGAAGGAAAGAGAAAGAAAGAGAAGGAAAGAG'
},
comment='There is a heterozygous 10 bp deletion.'),
)
def test_realigner_example_region(self, region_literal,
expected_window_literal,
expected_haplotypes, comment):
region = ranges.parse_literal(region_literal)
reads = _get_reads(region)
windows_haplotypes, realigned_reads = self.reads_realigner.realign_reads(
reads, region)
self.assertEqual(len(reads), len(realigned_reads))
self.assertEqual(
ranges.parse_literal(expected_window_literal),
windows_haplotypes[0].span)
self.assertEqual(expected_haplotypes, set(windows_haplotypes[0].haplotypes))
@parameterized.parameters(
[
dict(
region_literal='chr20:10,046,080-10,046,307',
variant_literal='chr20:10,046,179-10,046,188')
],)
def test_realigner_example_variant(self, region_literal, variant_literal):
"""All overlapping reads should include 10bp deletion at chr20:10046178."""
region = ranges.parse_literal(region_literal)
variant = ranges.parse_literal(variant_literal)
reads = _get_reads(region)
_, realigned_reads = self.reads_realigner.realign_reads(reads, region)
for read in realigned_reads:
has_variant = False
self.assertTrue(read.HasField('alignment'))
self.assertEqual(variant.reference_name,
read.alignment.position.reference_name)
ref_pos = read.alignment.position.position
for cigar in read.alignment.cigar:
self.assertIn(cigar.operation, utils.CIGAR_OPS)
if cigar.operation in utils.CIGAR_ALIGN_OPS:
ref_pos += cigar.operation_length
elif cigar.operation in utils.CIGAR_DELETE_OPS:
if (ref_pos == variant.start and
cigar.operation_length == variant.end - ref_pos):
has_variant = True
ref_pos += cigar.operation_length
if (read.alignment.position.position <= variant.start and
ref_pos >= variant.end):
self.assertTrue(has_variant)
def test_realigner_doesnt_create_invalid_intervals(self):
"""Tests that read sets don't result in a crash in reference_fai.cc."""
region = ranges.parse_literal('chr20:63,025,320-63,025,520')
# pylint: disable=g-complex-comprehension
reads = [
test_utils.make_read(
'ACCGT' * 50,
start=63025520 - 250,
cigar='250M',
quals=list(np.tile(range(30, 35), 50))) for _ in range(20)
]
# pylint: enable=g-complex-comprehension
self.reads_realigner.realign_reads(reads, region)
# These reads are aligned off the edge of the contig. Note that the
# reference bases in this interval are all Ns as well.
# pylint: disable=g-complex-comprehension
reads = [
test_utils.make_read(
'TTATA' * 50,
start=63025520 - 200,
cigar='200M50S',
quals=list(np.tile(range(30, 35), 50))) for _ in range(20)
]
# pylint: enable=g-complex-comprehension
self.reads_realigner.realign_reads(reads, region)
@parameterized.parameters(
dict(enabled=False, emit_reads=False),
dict(enabled=True, emit_reads=False),
dict(enabled=True, emit_reads=True),
)
def test_realigner_diagnostics(self, enabled, emit_reads):
# Make sure that by default we aren't emitting any diagnostic outputs.
dx_dir = test_utils.test_tmpfile('dx_enabled{}_emitreads_{}'.format(
enabled, emit_reads))
region_str = 'chr20:10046178-10046188'
region = ranges.parse_literal(region_str)
assembled_region_str = 'chr20:10046096-10046267'
reads, header = _get_reads_and_header(region)
self.config = realigner.realigner_config(FLAGS)
self.config.diagnostics.enabled = enabled
self.config.diagnostics.output_root = dx_dir
self.config.diagnostics.emit_realigned_reads = emit_reads
self.reads_realigner = realigner.Realigner(self.config, self.ref_reader,
header)
_, _ = self.reads_realigner.realign_reads(reads, region)
self.reads_realigner.diagnostic_logger.close() # Force close all resources.
if not enabled:
# Make sure our diagnostic output isn't emitted.
self.assertFalse(tf.io.gfile.exists(dx_dir))
else:
# Our root directory exists.
self.assertTrue(tf.io.gfile.isdir(dx_dir))
# We expect a realigner_metrics.csv in our rootdir with 1 entry in it.
metrics_file = os.path.join(
dx_dir, self.reads_realigner.diagnostic_logger.metrics_filename)
self.assertTrue(tf.io.gfile.exists(metrics_file))
with tf.io.gfile.GFile(metrics_file) as fin:
rows = list(csv.DictReader(fin))
self.assertLen(rows, 1)
self.assertEqual(
set(rows[0].keys()), {'window', 'k', 'n_haplotypes', 'time'})
self.assertEqual(rows[0]['window'], assembled_region_str)
self.assertEqual(int(rows[0]['k']), 25)
self.assertTrue(int(rows[0]['n_haplotypes']), 2)
# Check that our runtime is reasonable (greater than 0, less than 10 s).
self.assertTrue(0.0 < float(rows[0]['time']) < 10.0)
# As does the subdirectory for this region.
region_subdir = os.path.join(dx_dir, assembled_region_str)
self.assertTrue(tf.io.gfile.isdir(region_subdir))
# We always have a graph.dot
self.assertTrue(
tf.io.gfile.exists(
os.path.join(
region_subdir,
self.reads_realigner.diagnostic_logger.graph_filename)))
reads_file = os.path.join(
dx_dir, region_str,
self.reads_realigner.diagnostic_logger.realigned_reads_filename)
# if emit_reads=False then file should not exist and vice versa.
self.assertEqual(emit_reads, tf.io.gfile.exists(reads_file))
@parameterized.parameters(
dict(
read_seq='AAGGAAGTGCTAAAATCAGAATGAGAACCATGG'
'ATCCATGTTCAAGTACTAATTCTGGGC',
prefix='AGTGATCTAGTCCTTTTTGTTGTGCAAAAGGAAGTGCTAAAATCAGAATGAGAACCATGG',
suffix='ATCCATGTTCAAGTACTAATTCTGGGCAAGACACTGTTCTAAGTGCTATGAATATATTACC',
haplotypes=['CATCATCAT', ''],
expected_cigars=['33M9D27M', '60M']),
dict(
read_seq='TTGCCCGGGCATAAGGTGTTTCGGAGAAGCCTAG'
'TATATATA'
'CTCCGGTTTTTAAGTAGGGTCGTAGCAG',
prefix='AACGGGTCTACAAGTCTCTGCGTGTTGCCCGGGCATAAGGTGTTTCGGAGAAGCCTAG',
suffix='CTCCGGTTTTTAAGTAGGGTCGTAGCAGCAAAGTAAGAGTGGAACGCGTGGGCGACTA',
haplotypes=['', 'TATATATA'],
expected_cigars=['34M8I28M', '70M']),
dict(
read_seq='AAAAAAAAAAGGGGGGGGGGATTTTTTTTTTTTTCCCCCCCCCCCCCCC',
prefix='AAAAAAAAAAGGGGGGGGGG',
suffix='TTTTTTTTTTTTTCCCCCCCCCCCCCCC',
haplotypes=['A', ''],
expected_cigars=['49M', '20M1I28M']),
)
def test_align_to_haplotype(self, read_seq, prefix, suffix, haplotypes,
expected_cigars):
test_read = test_utils.make_read(read_seq, start=1)
reads = [test_read]
# Align to each haplotype in turn.
for i in range(len(haplotypes)):
aligned_reads = self.reads_realigner.align_to_haplotype(
haplotypes[i], haplotypes, prefix, suffix, reads, 'test', 1)
self.assertEqual(len(reads), len(aligned_reads))
self.assertEqual(
cigar_utils.format_cigar_units(aligned_reads[0].alignment.cigar),
expected_cigars[i])
@parameterized.parameters(
dict(alt_allele='CATTACA', ref_buffer_length=70, read_buffer_length=20),
dict(alt_allele='CATTACA', ref_buffer_length=20, read_buffer_length=20),
dict(alt_allele='G', ref_buffer_length=70, read_buffer_length=20),
# At or below read_buffer_length=15 the reads start to come back
# unaligned, but this depends on the specific ref and alt alleles, so
# this does not include exhaustive tests for how low these values can go.
)
def test_align_to_haplotype_stress_tests(self, alt_allele, ref_buffer_length,
read_buffer_length):
"""Testing what happens when read and reference sequences are shorter."""
# Start with long prefix and suffix to enable cutting it down as necessary
whole_prefix = 'AGTGATCTAGTCCTTTTTGTTGTGCAAAAGGAAGTGCTAAAATCAGAATGAGAACCATGGTCACCTGACATAGAC'
whole_suffix = 'ATCCATGTTCAAGTACTAATTCTGGGCAAGACACTGTTCTAAGTGCTATGAATATATTACCTCATTTAATCATCT'
ref_prefix = whole_prefix[-ref_buffer_length:]
ref_suffix = whole_suffix[:ref_buffer_length]
# Make two haplotypes.
ref_allele = ''
haplotypes = [ref_allele, alt_allele]
# Simulate one read from the reference and one from the alt haplotype.
read_prefix = ref_prefix[-read_buffer_length:]
read_suffix = ref_suffix[:read_buffer_length]
expected_cigars = [
# Aligning to ref haplotype: Insertion.
'{}M{}I{}M'.format(len(read_prefix), len(alt_allele), len(read_suffix)),
# Aligning to alt haplotype: All matching.
'{}M'.format(len(read_prefix) + len(alt_allele) + len(read_suffix))
]
reads = [
test_utils.make_read(read_prefix + alt_allele + read_suffix, start=1)
]
# Align to each haplotype in turn.
for i in range(len(haplotypes)):
aligned_reads = self.reads_realigner.align_to_haplotype(
haplotypes[i], haplotypes, ref_prefix, ref_suffix, reads, 'test', 1)
self.assertEqual(len(reads), len(aligned_reads))
self.assertEqual(
cigar_utils.format_cigar_units(aligned_reads[0].alignment.cigar),
expected_cigars[i])
def test_align_to_haplotype_empty_reads(self):
# Empty reads as input should return empty reads as output.
aligned_reads = self.reads_realigner.align_to_haplotype(
this_haplotype='G',
haplotypes=['G', ''],
prefix='AAA',
suffix='AAA',
reads=[],
contig='test',
ref_start=1)
self.assertEqual(aligned_reads, [])
@parameterized.parameters(
dict(
# No change.
read_seq='AAGGAAGTGCTAAAATCAGAATGAGAACCA',
cigar='30M',
expected_cigars=['30M'],
expected_sequences=['AAGGAAGTGCTAAAATCAGAATGAGAACCA'],
expected_positions=[1]),
dict(
# Basic split.
read_seq='AAGGAAGTGCTAAAATCAGAATGAGAACCA',
cigar='15M5000N15M',
expected_cigars=['15M', '15M'],
expected_sequences=['AAGGAAGTGCTAAAA', 'TCAGAATGAGAACCA'],
expected_positions=[1, 5016]),
dict(
# Split with 15bp filter.
read_seq='AAGGAAGTGCTAAAATCAGAATGAGAACCA',
cigar='10M10N20M',
expected_cigars=['20M'],
expected_sequences=['TAAAATCAGAATGAGAACCA'],
expected_positions=[21]),
dict(
# Many small splits filtered out.
read_seq='AAGGAAGTGCTAAAATCAGAATGAGAACCA',
cigar='5M5N5M5N5M5N5M5N5M5N5M',
expected_cigars=[],
expected_sequences=[],
expected_positions=[]),
dict(
# Large split.
read_seq='AAGGAAGTGCTAAAATCAGAATGAGAACCA',
cigar='2M5000N28M',
expected_cigars=['28M'],
expected_sequences=['GGAAGTGCTAAAATCAGAATGAGAACCA'],
expected_positions=[5003]),
dict(
# Insertion.
read_seq='AAGGAAGTGCTAATTTTTAATCAGAATGAGAACCA',
cigar='15M5I15M',
expected_cigars=['15M5I15M'],
expected_sequences=['AAGGAAGTGCTAATTTTTAATCAGAATGAGAACCA'],
expected_positions=[1]),
dict(
# Insertion + Split.
read_seq='AAGGAAGTGCTAAAAGGGGGTCAGAATGAGAACCA',
cigar='15M5I50N15M',
expected_cigars=['15M5I', '15M'],
expected_sequences=['AAGGAAGTGCTAAAAGGGGG', 'TCAGAATGAGAACCA'],
expected_positions=[1, 66]),
dict(
# Deletion.
read_seq='AAGGAAGTGCTAATTTTTAATCAGAATGAGAACCA',
cigar='15M5D15M',
expected_cigars=['15M5D15M'],
expected_sequences=['AAGGAAGTGCTAATTTTTAATCAGAATGAGAACCA'],
expected_positions=[1]),
dict(
# Deletion + Split.
read_seq='AAGGAAGTGCTAATTTCAGAATGAGAACCA',
cigar='15M5D50N15M',
expected_cigars=['15M5D', '15M'],
expected_sequences=['AAGGAAGTGCTAATT', 'TCAGAATGAGAACCA'],
expected_positions=[1, 71]),
dict(
# Sequence Match/Mismatch + Split.
read_seq='CCCCGGACACTTCTAGTTTGTCGGAGCGAGTC',
cigar='15=1X1=20N15=',
expected_cigars=['15=1X1=', '15='],
expected_sequences=['CCCCGGACACTTCTAGT', 'TTGTCGGAGCGAGTC'],
expected_positions=[1, 38]),
dict(
# Soft Clip + Split.
read_seq='TGAGCTAGTAGAATTTAGGGAGAAAGATTAATGCG',
cigar='15S5M50N15M',
expected_cigars=['15S5M', '15M'],
expected_sequences=['TGAGCTAGTAGAATTTAGGG', 'AGAAAGATTAATGCG'],
expected_positions=[1, 56]),
dict(
# Hard Clip + Split.
read_seq='ATCCCGGCCACGTTAATCCCGGCCACGTTA',
cigar='15H15M50N15M15H',
expected_cigars=['15H15M', '15M15H'],
expected_sequences=['ATCCCGGCCACGTTA', 'ATCCCGGCCACGTTA'],
expected_positions=[1, 66]),
)
def test_split_reads(self, read_seq, cigar, expected_cigars,
expected_sequences, expected_positions):
test_read = test_utils.make_read(read_seq, cigar=cigar, start=1)
reads = realigner.split_reads([test_read])
for i in range(len(reads)):
# Check sequences
self.assertEqual(reads[i].aligned_sequence, expected_sequences[i])
# Check cigars
self.assertEqual(
cigar_utils.format_cigar_units(reads[i].alignment.cigar),
expected_cigars[i])
# Check reference positions
self.assertEqual(reads[i].alignment.position.position,
expected_positions[i])
self.assertLen(reads, len(expected_sequences))
class RealignerIntegrationTest(absltest.TestCase):
def test_realigner_end2end(self):
ref_reader = fasta.IndexedFastaReader(testdata.CHR20_FASTA)
config = realigner.realigner_config(FLAGS)
reads_realigner = realigner.Realigner(config, ref_reader)
region_str = 'chr20:10,000,000-10,009,999'
windows_count = 0
regions = ranges.RangeSet.from_regions([region_str])
for region in regions.partition(1000):
with sam.SamReader(
testdata.CHR20_BAM,
read_requirements=reads_pb2.ReadRequirements()) as sam_reader:
in_reads = list(sam_reader.query(region))
windows, out_reads = reads_realigner.realign_reads(in_reads, region)
# We should always get back all of the reads we sent in. Instead of just
# checking the lengths are the same, make sure all the read names are the
# same.
six.assertCountEqual(self, [r.fragment_name for r in in_reads],
[r.fragment_name for r in out_reads])
# Check each window to make sure it's reasonable.
for window in windows:
# We always expect the reference sequence to be one of our haplotypes.
ref_seq = ref_reader.query(window.span)
self.assertIn(ref_seq, set(window.haplotypes))
windows_count += len(windows)
self.assertGreater(windows_count, 0)
class TrimTest(parameterized.TestCase):
@parameterized.parameters(
dict(
cigar='3M2D5M3I10M',
ref_trim=6,
ref_length=9,
expected_cigar='4M3I5M',
expected_read_trim=4,
expected_read_length=12,
comment='Start and end window in different match operations.'),
dict(
cigar='30M',
ref_trim=5,
ref_length=10,
expected_cigar='10M',
expected_read_trim=5,
expected_read_length=10,
comment='Start and end window in the same cigar entry'),
dict(
cigar='10D10M',
ref_trim=5,
ref_length=10,
expected_cigar='5D5M',
expected_read_trim=0,
expected_read_length=5,
comment='Start window in a deletion'),
dict(
cigar='10I10M',
ref_trim=5,
ref_length=5,
expected_cigar='5M',
expected_read_trim=15,
expected_read_length=5,
comment='Start window in an insertion'),
dict(
cigar='10M',
ref_trim=5,
ref_length=10,
expected_cigar='5M',
expected_read_trim=5,
expected_read_length=5,
comment='Read ends before the window'),
dict(
cigar='10M',
ref_trim=20,
ref_length=10,
expected_cigar='',
expected_read_trim=10,
expected_read_length=0,
comment='Read ends before the trim'),
dict(
cigar='10M20D10M',
ref_trim=12,
ref_length=5,
expected_cigar='5D',
expected_read_trim=10,
expected_read_length=0,
comment='Deletion covers the whole window'),
dict(
cigar='10M20I10M',
ref_trim=10,
ref_length=20,
expected_cigar='20I10M',
expected_read_trim=10,
expected_read_length=30,
comment='Trim to edge of an insertion'),
dict(
cigar='10M2I10M',
ref_trim=0,
ref_length=20,
expected_cigar='10M2I10M',
expected_read_trim=0,
expected_read_length=22,
comment='Zero trim'),
)
def test_trim_cigar(self, cigar, ref_trim, ref_length, expected_cigar,
expected_read_trim, expected_read_length, comment):
read = test_utils.make_read('AAAATAAAATAAAATAAAATA', start=100, cigar=cigar)
output_cigar, output_read_trim, output_read_length = realigner.trim_cigar(
read.alignment.cigar, ref_trim, ref_length)
self.assertEqual(
cigar_utils.format_cigar_units(output_cigar),
expected_cigar,
msg='Wrong cigar for: {}'.format(comment))
self.assertEqual(
output_read_trim,
expected_read_trim,
msg='Wrong read trim for: {}'.format(comment))
self.assertEqual(
output_read_length,
expected_read_length,
msg='Wrong read length for: {}'.format(comment))
self.assertEqual(
cigar_utils.format_cigar_units(read.alignment.cigar),
cigar,
msg='Cigar in original read was mutated.')
@parameterized.parameters([
# Window region literals are 1-based, but all other coordinates are
# 0-based: chr1:11-20 means start at 10 and end at 20 (exclusive).
dict(
window='chr1:11-20',
cigar='9M',
start=8,
read_length=9,
expected_cigar='7M',
expected_position=10,
expected_read_length=7,
comment='Trim first 2 bases'),
dict(
window='chr1:11-20',
cigar='9M',
start=13,
read_length=9,
expected_cigar='7M',
expected_position=13,
expected_read_length=7,
comment='Trim last 2 bases'),
dict(
window='chr1:11-20',
cigar='5M',
start=12,
read_length=5,
expected_cigar='5M',
expected_position=12,
expected_read_length=5,
comment='Read fits entirely inside window'),
dict(
window='chr1:11-20',
cigar='9M',
start=10,
read_length=9,
expected_cigar='9M',
expected_position=10,
expected_read_length=9,
comment='Read starts and ends at window edges'),
])
def test_trim_read(self, window, cigar, start, read_length, expected_cigar,
expected_position, expected_read_length, comment):
read = test_utils.make_read(
'A' * read_length, start=start, cigar=cigar, quals=[30] * read_length)
region = ranges.parse_literal(window)
output = realigner.trim_read(read, region)
self.assertEqual(
expected_cigar,
cigar_utils.format_cigar_units(output.alignment.cigar),
msg='Wrong cigar for case: {}'.format(comment))
# Start position of the alignment.
self.assertEqual(
output.alignment.position.position,
expected_position,
msg='Wrong position for case: {}'.format(comment))
# Read sequence.
self.assertLen(
output.aligned_sequence,
expected_read_length,
msg='Wrong length of aligned_sequence for case: {}'.format(comment))
# Base quality scores.
self.assertLen(
output.aligned_quality,
expected_read_length,
msg='Wrong length of aligned_quality for case: {}'.format(comment))
if __name__ == '__main__':
absltest.main()
|
|
import unittest
from numba import objmode
import numpy as np
from numba.core import ir, compiler
class TestIR(unittest.TestCase):
def test_IRScope(self):
filename = "<?>"
top = ir.Scope(parent=None, loc=ir.Loc(filename=filename, line=1))
local = ir.Scope(parent=top, loc=ir.Loc(filename=filename, line=2))
apple = local.define('apple', loc=ir.Loc(filename=filename, line=3))
self.assertIs(local.get('apple'), apple)
self.assertEqual(len(local.localvars), 1)
orange = top.define('orange', loc=ir.Loc(filename=filename, line=4))
self.assertEqual(len(local.localvars), 1)
self.assertEqual(len(top.localvars), 1)
self.assertIs(top.get('orange'), orange)
self.assertIs(local.get('orange'), orange)
more_orange = local.define('orange', loc=ir.Loc(filename=filename,
line=5))
self.assertIs(top.get('orange'), orange)
self.assertIsNot(local.get('orange'), not orange)
self.assertIs(local.get('orange'), more_orange)
try:
local.define('orange', loc=ir.Loc(filename=filename, line=5))
except ir.RedefinedError:
pass
else:
self.fail("Expecting an %s" % ir.RedefinedError)
class CheckEquality(unittest.TestCase):
var_a = ir.Var(None, 'a', ir.unknown_loc)
var_b = ir.Var(None, 'b', ir.unknown_loc)
var_c = ir.Var(None, 'c', ir.unknown_loc)
var_d = ir.Var(None, 'd', ir.unknown_loc)
var_e = ir.Var(None, 'e', ir.unknown_loc)
loc1 = ir.Loc('mock', 1, 0)
loc2 = ir.Loc('mock', 2, 0)
loc3 = ir.Loc('mock', 3, 0)
def check(self, base, same=[], different=[]):
for s in same:
self.assertTrue(base == s)
for d in different:
self.assertTrue(base != d)
class TestIRMeta(CheckEquality):
"""
Tests IR node meta, like Loc and Scope
"""
def test_loc(self):
a = ir.Loc('file', 1, 0)
b = ir.Loc('file', 1, 0)
c = ir.Loc('pile', 1, 0)
d = ir.Loc('file', 2, 0)
e = ir.Loc('file', 1, 1)
self.check(a, same=[b,], different=[c, d, e])
f = ir.Loc('file', 1, 0, maybe_decorator=False)
g = ir.Loc('file', 1, 0, maybe_decorator=True)
self.check(a, same=[f, g])
def test_scope(self):
parent1 = ir.Scope(None, self.loc1)
parent2 = ir.Scope(None, self.loc1)
parent3 = ir.Scope(None, self.loc2)
self.check(parent1, same=[parent2, parent3,])
a = ir.Scope(parent1, self.loc1)
b = ir.Scope(parent1, self.loc1)
c = ir.Scope(parent1, self.loc2)
d = ir.Scope(parent3, self.loc1)
self.check(a, same=[b, c, d])
# parent1 and parent2 are equal, so children referring to either parent
# should be equal
e = ir.Scope(parent2, self.loc1)
self.check(a, same=[e,])
class TestIRNodes(CheckEquality):
"""
Tests IR nodes
"""
def test_terminator(self):
# terminator base class inst should always be equal
t1 = ir.Terminator()
t2 = ir.Terminator()
self.check(t1, same=[t2])
def test_jump(self):
a = ir.Jump(1, self.loc1)
b = ir.Jump(1, self.loc1)
c = ir.Jump(1, self.loc2)
d = ir.Jump(2, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_return(self):
a = ir.Return(self.var_a, self.loc1)
b = ir.Return(self.var_a, self.loc1)
c = ir.Return(self.var_a, self.loc2)
d = ir.Return(self.var_b, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_raise(self):
a = ir.Raise(self.var_a, self.loc1)
b = ir.Raise(self.var_a, self.loc1)
c = ir.Raise(self.var_a, self.loc2)
d = ir.Raise(self.var_b, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_staticraise(self):
a = ir.StaticRaise(AssertionError, None, self.loc1)
b = ir.StaticRaise(AssertionError, None, self.loc1)
c = ir.StaticRaise(AssertionError, None, self.loc2)
e = ir.StaticRaise(AssertionError, ("str",), self.loc1)
d = ir.StaticRaise(RuntimeError, None, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_branch(self):
a = ir.Branch(self.var_a, 1, 2, self.loc1)
b = ir.Branch(self.var_a, 1, 2, self.loc1)
c = ir.Branch(self.var_a, 1, 2, self.loc2)
d = ir.Branch(self.var_b, 1, 2, self.loc1)
e = ir.Branch(self.var_a, 2, 2, self.loc1)
f = ir.Branch(self.var_a, 1, 3, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_expr(self):
a = ir.Expr('some_op', self.loc1)
b = ir.Expr('some_op', self.loc1)
c = ir.Expr('some_op', self.loc2)
d = ir.Expr('some_other_op', self.loc1)
self.check(a, same=[b, c], different=[d])
def test_setitem(self):
a = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc1)
b = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc1)
c = ir.SetItem(self.var_a, self.var_b, self.var_c, self.loc2)
d = ir.SetItem(self.var_d, self.var_b, self.var_c, self.loc1)
e = ir.SetItem(self.var_a, self.var_d, self.var_c, self.loc1)
f = ir.SetItem(self.var_a, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_staticsetitem(self):
a = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc1)
b = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc1)
c = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_c, self.loc2)
d = ir.StaticSetItem(self.var_d, 1, self.var_b, self.var_c, self.loc1)
e = ir.StaticSetItem(self.var_a, 2, self.var_b, self.var_c, self.loc1)
f = ir.StaticSetItem(self.var_a, 1, self.var_d, self.var_c, self.loc1)
g = ir.StaticSetItem(self.var_a, 1, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f, g])
def test_delitem(self):
a = ir.DelItem(self.var_a, self.var_b, self.loc1)
b = ir.DelItem(self.var_a, self.var_b, self.loc1)
c = ir.DelItem(self.var_a, self.var_b, self.loc2)
d = ir.DelItem(self.var_c, self.var_b, self.loc1)
e = ir.DelItem(self.var_a, self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_del(self):
a = ir.Del(self.var_a.name, self.loc1)
b = ir.Del(self.var_a.name, self.loc1)
c = ir.Del(self.var_a.name, self.loc2)
d = ir.Del(self.var_b.name, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_setattr(self):
a = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc1)
b = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc1)
c = ir.SetAttr(self.var_a, 'foo', self.var_b, self.loc2)
d = ir.SetAttr(self.var_c, 'foo', self.var_b, self.loc1)
e = ir.SetAttr(self.var_a, 'bar', self.var_b, self.loc1)
f = ir.SetAttr(self.var_a, 'foo', self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_delattr(self):
a = ir.DelAttr(self.var_a, 'foo', self.loc1)
b = ir.DelAttr(self.var_a, 'foo', self.loc1)
c = ir.DelAttr(self.var_a, 'foo', self.loc2)
d = ir.DelAttr(self.var_c, 'foo', self.loc1)
e = ir.DelAttr(self.var_a, 'bar', self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_assign(self):
a = ir.Assign(self.var_a, self.var_b, self.loc1)
b = ir.Assign(self.var_a, self.var_b, self.loc1)
c = ir.Assign(self.var_a, self.var_b, self.loc2)
d = ir.Assign(self.var_c, self.var_b, self.loc1)
e = ir.Assign(self.var_a, self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_print(self):
a = ir.Print((self.var_a,), self.var_b, self.loc1)
b = ir.Print((self.var_a,), self.var_b, self.loc1)
c = ir.Print((self.var_a,), self.var_b, self.loc2)
d = ir.Print((self.var_c,), self.var_b, self.loc1)
e = ir.Print((self.var_a,), self.var_c, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_storemap(self):
a = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc1)
b = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc1)
c = ir.StoreMap(self.var_a, self.var_b, self.var_c, self.loc2)
d = ir.StoreMap(self.var_d, self.var_b, self.var_c, self.loc1)
e = ir.StoreMap(self.var_a, self.var_d, self.var_c, self.loc1)
f = ir.StoreMap(self.var_a, self.var_b, self.var_d, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_yield(self):
a = ir.Yield(self.var_a, self.loc1, 0)
b = ir.Yield(self.var_a, self.loc1, 0)
c = ir.Yield(self.var_a, self.loc2, 0)
d = ir.Yield(self.var_b, self.loc1, 0)
e = ir.Yield(self.var_a, self.loc1, 1)
self.check(a, same=[b, c], different=[d, e])
def test_enterwith(self):
a = ir.EnterWith(self.var_a, 0, 1, self.loc1)
b = ir.EnterWith(self.var_a, 0, 1, self.loc1)
c = ir.EnterWith(self.var_a, 0, 1, self.loc2)
d = ir.EnterWith(self.var_b, 0, 1, self.loc1)
e = ir.EnterWith(self.var_a, 1, 1, self.loc1)
f = ir.EnterWith(self.var_a, 0, 2, self.loc1)
self.check(a, same=[b, c], different=[d, e, f])
def test_arg(self):
a = ir.Arg('foo', 0, self.loc1)
b = ir.Arg('foo', 0, self.loc1)
c = ir.Arg('foo', 0, self.loc2)
d = ir.Arg('bar', 0, self.loc1)
e = ir.Arg('foo', 1, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_const(self):
a = ir.Const(1, self.loc1)
b = ir.Const(1, self.loc1)
c = ir.Const(1, self.loc2)
d = ir.Const(2, self.loc1)
self.check(a, same=[b, c], different=[d])
def test_global(self):
a = ir.Global('foo', 0, self.loc1)
b = ir.Global('foo', 0, self.loc1)
c = ir.Global('foo', 0, self.loc2)
d = ir.Global('bar', 0, self.loc1)
e = ir.Global('foo', 1, self.loc1)
self.check(a, same=[b, c], different=[d, e])
def test_var(self):
a = ir.Var(None, 'foo', self.loc1)
b = ir.Var(None, 'foo', self.loc1)
c = ir.Var(None, 'foo', self.loc2)
d = ir.Var(ir.Scope(None, ir.unknown_loc), 'foo', self.loc1)
e = ir.Var(None, 'bar', self.loc1)
self.check(a, same=[b, c, d], different=[e])
def test_undefinedtype(self):
a = ir.UndefinedType()
b = ir.UndefinedType()
self.check(a, same=[b])
def test_loop(self):
a = ir.Loop(1, 3)
b = ir.Loop(1, 3)
c = ir.Loop(2, 3)
d = ir.Loop(1, 4)
self.check(a, same=[b], different=[c, d])
def test_with(self):
a = ir.With(1, 3)
b = ir.With(1, 3)
c = ir.With(2, 3)
d = ir.With(1, 4)
self.check(a, same=[b], different=[c, d])
# used later
_GLOBAL = 1234
class TestIRCompounds(CheckEquality):
"""
Tests IR concepts that have state
"""
def test_varmap(self):
a = ir.VarMap()
a.define(self.var_a, 'foo')
a.define(self.var_b, 'bar')
b = ir.VarMap()
b.define(self.var_a, 'foo')
b.define(self.var_b, 'bar')
c = ir.VarMap()
c.define(self.var_a, 'foo')
c.define(self.var_c, 'bar')
self.check(a, same=[b], different=[c])
def test_block(self):
def gen_block():
parent = ir.Scope(None, self.loc1)
tmp = ir.Block(parent, self.loc2)
assign1 = ir.Assign(self.var_a, self.var_b, self.loc3)
assign2 = ir.Assign(self.var_a, self.var_c, self.loc3)
assign3 = ir.Assign(self.var_c, self.var_b, self.loc3)
tmp.append(assign1)
tmp.append(assign2)
tmp.append(assign3)
return tmp
a = gen_block()
b = gen_block()
c = gen_block().append(ir.Assign(self.var_a, self.var_b, self.loc3))
self.check(a, same=[b], different=[c])
def test_functionir(self):
def run_frontend(x):
return compiler.run_frontend(x, emit_dels=True)
# this creates a function full of all sorts of things to ensure the IR
# is pretty involved, it then compares two instances of the compiled
# function IR to check the IR is the same invariant of objects, and then
# a tiny mutation is made to the IR in the second function and detection
# of this change is checked.
def gen():
_FREEVAR = 0xCAFE
def foo(a, b, c=12, d=1j, e=None):
f = a + b
a += _FREEVAR
g = np.zeros(c, dtype=np.complex64)
h = f + g
i = 1j / d
if np.abs(i) > 0:
k = h / i
l = np.arange(1, c + 1)
with objmode():
print(e, k)
m = np.sqrt(l - g)
if np.abs(m[0]) < 1:
n = 0
for o in range(a):
n += 0
if np.abs(n) < 3:
break
n += m[2]
p = g / l
q = []
for r in range(len(p)):
q.append(p[r])
if r > 4 + 1:
with objmode(s='intp', t='complex128'):
s = 123
t = 5
if s > 122:
t += s
t += q[0] + _GLOBAL
return f + o + r + t + r + a + n
return foo
x = gen()
y = gen()
x_ir = run_frontend(x)
y_ir = run_frontend(y)
self.assertTrue(x_ir.equal_ir(y_ir))
def check_diffstr(string, pointing_at=[]):
lines = string.splitlines()
for item in pointing_at:
for l in lines:
if l.startswith('->'):
if item in l:
break
else:
raise AssertionError("Could not find %s " % item)
self.assertIn("IR is considered equivalent", x_ir.diff_str(y_ir))
# minor mutation, simply switch branch targets on last branch
for label in reversed(list(y_ir.blocks.keys())):
blk = y_ir.blocks[label]
if isinstance(blk.body[-1], ir.Branch):
ref = blk.body[-1]
ref.truebr, ref.falsebr = ref.falsebr, ref.truebr
break
check_diffstr(x_ir.diff_str(y_ir), ['branch'])
z = gen()
self.assertFalse(x_ir.equal_ir(y_ir))
z_ir = run_frontend(z)
change_set = set()
for label in reversed(list(z_ir.blocks.keys())):
blk = z_ir.blocks[label]
ref = blk.body[:-1]
idx = None
for i in range(len(ref)):
# look for two adjacent Del
if (isinstance(ref[i], ir.Del) and
isinstance(ref[i + 1], ir.Del)):
idx = i
break
if idx is not None:
b = blk.body
change_set.add(str(b[idx + 1]))
change_set.add(str(b[idx]))
b[idx], b[idx + 1] = b[idx + 1], b[idx]
break
# ensure that a mutation occurred.
self.assertTrue(change_set)
self.assertFalse(x_ir.equal_ir(z_ir))
self.assertEqual(len(change_set), 2)
for item in change_set:
self.assertTrue(item.startswith('del '))
check_diffstr(x_ir.diff_str(z_ir), change_set)
def foo(a, b):
c = a * 2
d = c + b
e = np.sqrt(d)
return e
def bar(a, b): # same as foo
c = a * 2
d = c + b
e = np.sqrt(d)
return e
def baz(a, b):
c = a * 2
d = b + c
e = np.sqrt(d + 1)
return e
foo_ir = run_frontend(foo)
bar_ir = run_frontend(bar)
self.assertTrue(foo_ir.equal_ir(bar_ir))
self.assertIn("IR is considered equivalent", foo_ir.diff_str(bar_ir))
baz_ir = run_frontend(baz)
self.assertFalse(foo_ir.equal_ir(baz_ir))
tmp = foo_ir.diff_str(baz_ir)
self.assertIn("Other block contains more statements", tmp)
check_diffstr(tmp, ["c + b", "b + c"])
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# cardinal_pythonlib/argparse_func.py
"""
===============================================================================
Original code copyright (C) 2009-2021 Rudolf Cardinal ([email protected]).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Functions to help with argparse.**
"""
# noinspection PyProtectedMember
from argparse import (
_HelpAction,
_SubParsersAction,
ArgumentDefaultsHelpFormatter,
ArgumentParser,
ArgumentTypeError,
Namespace,
RawDescriptionHelpFormatter,
)
from typing import Any, Dict, List, Type
# =============================================================================
# Argparse actions
# =============================================================================
class ShowAllSubparserHelpAction(_HelpAction):
"""
Class to serve as the ``action`` for an ``argparse`` top-level parser that
shows help for all subparsers. As per
https://stackoverflow.com/questions/20094215/argparse-subparser-monolithic-help-output
""" # noqa
def __call__(self,
parser: ArgumentParser,
namespace: Namespace,
values: List[Any], # ?
option_string: str = None) -> None:
# 1. Print top-level help
parser.print_help()
sep = "=" * 79 # "-" less helpful when using grep for "--option"!
# 2. Print help for all subparsers
# noinspection PyProtectedMember
subparsers_actions = [
action for action in parser._actions
if isinstance(action, _SubParsersAction)
] # type: List[_SubParsersAction]
messages = [""] # type: List[str]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
messages.append(sep)
messages.append(f"Help for command '{choice}'")
messages.append(sep)
messages.append(subparser.format_help())
print("\n".join(messages))
parser.exit()
# =============================================================================
# Argparse formatters
# =============================================================================
class RawDescriptionArgumentDefaultsHelpFormatter(
ArgumentDefaultsHelpFormatter,
RawDescriptionHelpFormatter):
"""
Combines the features of
- :class:`RawDescriptionHelpFormatter` -- don't mangle the description
- :class:`ArgumentDefaultsHelpFormatter` -- print argument defaults
"""
pass
# =============================================================================
# Argparse types/checkers
# =============================================================================
def str2bool(v: str) -> bool:
"""
``argparse`` type that maps strings in case-insensitive fashion like this:
.. code-block:: none
argument strings value
------------------------------- -----
'yes', 'true', 't', 'y', '1' True
'no', 'false', 'f', 'n', '0' False
From
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
Specimen usage:
.. code-block:: python
parser.add_argument(
"--nice", type=str2bool, nargs='?',
const=True, # if --nice is present with no parameter
default=NICE, # if the argument is entirely absent
help="Activate nice mode.")
""" # noqa
lv = v.lower()
if lv in ('yes', 'true', 't', 'y', '1'):
return True
elif lv in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ArgumentTypeError('Boolean value expected.')
def positive_int(value: str) -> int:
"""
``argparse`` argument type that checks that its value is a positive
integer.
"""
try:
ivalue = int(value)
assert ivalue > 0
except (AssertionError, TypeError, ValueError):
raise ArgumentTypeError(
f"{value!r} is an invalid positive int")
return ivalue
def nonnegative_int(value: str) -> int:
"""
``argparse`` argument type that checks that its value is a non-negative
integer.
"""
try:
ivalue = int(value)
assert ivalue >= 0
except (AssertionError, TypeError, ValueError):
raise ArgumentTypeError(
f"{value!r} is an invalid non-negative int")
return ivalue
def percentage(value: str) -> float:
"""
``argparse`` argument type that checks that its value is a percentage (in
the sense of a float in the range [0, 100]).
"""
try:
fvalue = float(value)
assert 0 <= fvalue <= 100
except (AssertionError, TypeError, ValueError):
raise ArgumentTypeError(
f"{value!r} is an invalid percentage value")
return fvalue
class MapType(object):
"""
``argparse`` type maker that maps strings to a dictionary (map).
"""
def __init__(self,
map_separator: str = ":",
pair_separator: str = ",",
strip: bool = True,
from_type: Type = str,
to_type: Type = str) -> None:
"""
Args:
map_separator:
string that separates the "from" and "to" members of a pair
pair_separator:
string that separates different pairs
strip:
strip whitespace after splitting?
from_type:
type to coerce "from" values to; e.g. ``str``, ``int``
to_type:
type to coerce "to" values to; e.g. ``str``, ``int``
"""
self.map_separator = map_separator
self.pair_separator = pair_separator
self.strip = strip
self.from_type = from_type
self.to_type = to_type
def __call__(self, value: str) -> Dict:
result = {}
pairs = value.split(self.pair_separator)
for pair in pairs:
from_str, to_str = pair.split(self.map_separator)
if self.strip:
from_str = from_str.strip()
to_str = to_str.strip()
try:
from_val = self.from_type(from_str)
except (TypeError, ValueError):
raise ArgumentTypeError(
f"{from_str!r} cannot be converted to type "
f"{self.from_type!r}")
try:
to_val = self.to_type(to_str)
except (TypeError, ValueError):
raise ArgumentTypeError(
f"{to_str!r} cannot be converted to type "
f"{self.to_type!r}")
result[from_val] = to_val
return result
|
|
from PyQt4 import QtGui, QtCore#, Qsci
#from diagramscene import DiagramItem, DiagramTextItem
from FsmState import FsmState
from FsmStateAction import FsmStateAction
from FsmTransition import FsmTransition
class FsmScene(QtGui.QGraphicsScene):
InsertState, InsertStateAction, InsertLine, InsertText, MoveItem = range(5)
#itemInserted = QtCore.pyqtSignal(DiagramItem)
textInserted = QtCore.pyqtSignal(QtGui.QGraphicsTextItem)
itemSelected = QtCore.pyqtSignal(QtGui.QGraphicsItem)
mouseMoved = QtCore.pyqtSignal(QtCore.QPointF)
def __init__(self, itemMenu, parent=None):
super(FsmScene, self).__init__(parent)
self.gridSize = 50
self.stateCreatedIdx = 0
self.myItemMenu = itemMenu
self.myMode = self.MoveItem
self.line = None
self.textItem = None
self.myItemColor = QtCore.Qt.white
self.myTextColor = QtCore.Qt.black
self.myLineColor = QtCore.Qt.black
self.myFont = QtGui.QFont()
def saveDocument(self, fileOut):
for i in self.items():
fileOut.write(i.toStore())
# if isinstance(i,FsmState):
# fileOut.write(i.toStore())
# elif isinstance(i,FsmTransition):
# fileOut.write("FsmTransition\n")
# Efficiently draws a grid in the background.
# For more information: http://www.qtcentre.org/threads/5609-Drawing-grids-efficiently-in-QGraphicsScene?p=28905#post28905
def drawBackground(self, painter, rect):
left = int(rect.left()) - (int(rect.left()) % self.gridSize)
right = int(rect.right())
top = int(rect.top()) - (int(rect.top()) % self.gridSize)
bottom = int(rect.bottom())
lines = [];
for x in range(left, right, self.gridSize):
lines.append(QtCore.QLineF(x, rect.top(), x, rect.bottom()));
for y in range(top, bottom, self.gridSize):
lines.append(QtCore.QLineF(rect.left(), y, rect.right(), y));
painter.setRenderHint(QtGui.QPainter.Antialiasing, False)
painter.setPen(QtGui.QPen(QtCore.Qt.gray, 0))
painter.drawLines(lines);
# def setLineColor(self, color):
# self.myLineColor = color
# if self.isItemChange(FsmTransition):
# item = self.selectedItems()[0]
# item.setColor(self.myLineColor)
# self.update()
# def setTextColor(self, color):
# self.myTextColor = color
# if self.isItemChange(DiagramTextItem):
# item = self.selectedItems()[0]
# item.setDefaultTextColor(self.myTextColor)
# def setItemColor(self, color):
# self.myItemColor = color
# if self.isItemChange(DiagramItem):
# item = self.selectedItems()[0]
# item.setBrush(self.myItemColor)
# def setFont(self, font):
# self.myFont = font
# if self.isItemChange(DiagramTextItem):
# item = self.selectedItems()[0]
# item.setFont(self.myFont)
def setMode(self, mode):
self.myMode = mode
def editorLostFocus(self, item):
cursor = item.textCursor()
cursor.clearSelection()
item.setTextCursor(cursor)
if item.toPlainText():
self.removeItem(item)
item.deleteLater()
def mousePressEvent(self, mouseEvent):
pos = mouseEvent.scenePos().toPoint() / self.gridSize * self.gridSize
stateAtPos = self.items(mouseEvent.scenePos())
while len(stateAtPos) and not isinstance(stateAtPos[0], FsmState):
stateAtPos.pop(0)
if len(stateAtPos):
stateAtPos = stateAtPos[0]
else:
stateAtPos = None
if (mouseEvent.button() != QtCore.Qt.LeftButton):
return
if self.myMode == self.InsertState:
item = FsmState(self.myItemMenu, 'S{}'.format(self.stateCreatedIdx))
self.stateCreatedIdx += 1
#item.setBrush(self.myItemColor)
self.addItem(item)
item.setPos(pos)
#self.itemInserted.emit(item)
# elif self.myMode == self.InsertItem:
# item = DiagramItem(self.myItemType, self.myItemMenu)
# item.setBrush(self.myItemColor)
# self.addItem(item)
# item.setPos(mouseEvent.scenePos())
# self.itemInserted.emit(item)
elif self.myMode == self.InsertStateAction:
if stateAtPos:
item = FsmStateAction("--type something", parent=stateAtPos)
# editor = Qsci.QsciScintilla()
# lexer = Qsci.QsciLexerVHDL()
# api = Qsci.QsciAPIs(lexer)
# api.add("then")
# api.prepare()
# editor.setLexer(lexer)
# editor.setAutoCompletionThreshold(2)
# editor.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs)
# editor.setText("--Type some VHDL here\nif youcan then\nvery <= good;\n\endif")
# item = self.addWidget(editor)
# item.setPos(pos)
elif self.myMode == self.InsertLine:
#self.line = QtGui.QGraphicsLineItem(QtCore.QLineF(mouseEvent.scenePos(),
# mouseEvent.scenePos()))
startItems = self.items(mouseEvent.scenePos())
while len(startItems) and not isinstance(startItems[0], FsmState):
startItems.pop(0)
if len(startItems) and \
isinstance(startItems[0], FsmState):
if not self.line:
startItem = startItems[0]
self.line = FsmTransition(startItem, None)
self.line.setPen(QtGui.QPen(self.myLineColor, 0))
self.addItem(self.line)
elif self.line:
self.line.addIntermediatePoint(mouseEvent.scenePos())
elif self.myMode == self.InsertText:
textItem = DiagramTextItem()
textItem.setFont(self.myFont)
textItem.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
textItem.setZValue(1000.0)
textItem.lostFocus.connect(self.editorLostFocus)
textItem.selectedChange.connect(self.itemSelected)
self.addItem(textItem)
textItem.setDefaultTextColor(self.myTextColor)
textItem.setPos(pos)
self.textInserted.emit(textItem)
super(FsmScene, self).mousePressEvent(mouseEvent)
def mouseMoveEvent(self, mouseEvent):
self.mouseMoved.emit(mouseEvent.scenePos())
if self.myMode == self.InsertLine and self.line:
self.line.popIntermediatePoint()
self.line.addIntermediatePoint(mouseEvent.scenePos())
#newLine = QtCore.QLineF(self.line.line().p1(), mouseEvent.scenePos())
#self.line.setLine(newLine)
else: #if self.myMode == self.MoveItem:
super(FsmScene, self).mouseMoveEvent(mouseEvent)
def mouseReleaseEvent(self, mouseEvent):
if self.line and self.myMode == self.InsertLine:
# startItems = self.items(self.line.line().p1())
# if len(startItems) and startItems[0] == self.line:
# startItems.pop(0)
endItems = self.items(mouseEvent.scenePos())
while len(endItems) and not isinstance(endItems[0], FsmState):
endItems.pop(0)
if len(endItems) and \
isinstance(endItems[0], FsmState):
#in endItems[0] is equal to self.line.startItem (loop back)
#we should check that there is at least an intermediate point
if (len(self.line.intermediatePoints) and \
self.line.startItem() == endItems[0]) or \
self.line.startItem() != endItems[0]:
self.line.popIntermediatePoint()
self.line.addEndItem(endItems[0])
self.line.startItem().addOutboundTransition(self.line)
endItems[0].addInboundTransition(self.line)
self.line.setZValue(-1000.0)
self.line = None
else:
self.line.popIntermediatePoint()
self.line.addIntermediatePoint(mouseEvent.scenePos())
# if len(startItems) and len(endItems) and \
# isinstance(startItems[0], FsmState) and \
# isinstance(endItems[0], FsmState) and \
# startItems[0] != endItems[0]:
# startItem = startItems[0]
# endItem = endItems[0]
# arrow = FsmTransition(startItem, endItem)
# arrow.setColor(self.myLineColor)
# startItem.addOutboundTransition(arrow)
# endItem.addInboundTransition(arrow)
# arrow.setZValue(-1000.0)
# self.addItem(arrow)
# arrow.updatePosition()
#self.line = None
super(FsmScene, self).mouseReleaseEvent(mouseEvent)
#we should realign all selected States to grid
#if self.myMode == self.MoveItem:
for el in self.selectedItems():
if isinstance(el, FsmState):
pos = el.scenePos().toPoint() / self.gridSize * self.gridSize
el.setPos(pos)
def keyPressEvent(self, keyEvent):
if self.line:
if keyEvent.key()==QtCore.Qt.Key_Escape:
self.removeItem(self.line)
self.line = None
elif keyEvent.key()==QtCore.Qt.Key_Backspace and \
self.line.getNbOfIntermediatePoints() > 1:
self.line.popIntermediatePoint()
return super(FsmScene, self).keyPressEvent(keyEvent)
def isItemChange(self, type):
for item in self.selectedItems():
if isinstance(item, type):
return True
return False
if __name__ == '__main__':
import sys
from MainWindow import MainWindow
from PyQt4.QtTest import QTest
from PyQt4.QtCore import Qt
app = QtGui.QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.setGeometry(100, 100, 800, 500)
mainWindow.show()
QTest.mouseClick(mainWindow.addStateButton, Qt.LeftButton)
QTest.mouseClick(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(400,200))
QTest.mouseClick(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(100,250))
QTest.mouseClick(mainWindow.linePointerButton, Qt.LeftButton)
# QTest.mousePress(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(400,200))
# QTest.mouseMove(mainWindow.view.viewport(), QtCore.QPoint(100,250))
# QTest.mouseRelease(mainWindow.view.viewport(), Qt.LeftButton, Qt.NoModifier, QtCore.QPoint(100,250))
sys.exit(app.exec_())
|
|
"""Base Command class, and related routines"""
from __future__ import absolute_import
import logging
import logging.config
import optparse
import os
import sys
import warnings
from pip._internal import cmdoptions
from pip._internal.baseparser import (
ConfigOptionParser, UpdatingDefaultsHelpFormatter,
)
from pip._internal.compat import WINDOWS
from pip._internal.download import PipSession
from pip._internal.exceptions import (
BadCommand, CommandError, InstallationError, PreviousBuildDirError,
UninstallationError,
)
from pip._internal.index import PackageFinder
from pip._internal.locations import running_under_virtualenv
from pip._internal.req.req_file import parse_requirements
from pip._internal.req.req_install import InstallRequirement
from pip._internal.status_codes import (
ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR,
VIRTUALENV_NOT_FOUND,
)
from pip._internal.utils import deprecation
from pip._internal.utils.logging import IndentingFormatter
from pip._internal.utils.misc import get_prog, normalize_path
from pip._internal.utils.outdated import pip_version_check
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional
__all__ = ['Command']
logger = logging.getLogger(__name__)
class Command(object):
name = None # type: Optional[str]
usage = None # type: Optional[str]
hidden = False # type: bool
ignore_require_venv = False # type: bool
log_streams = ("ext://sys.stdout", "ext://sys.stderr")
def __init__(self, isolated=False):
parser_kw = {
'usage': self.usage,
'prog': '%s %s' % (get_prog(), self.name),
'formatter': UpdatingDefaultsHelpFormatter(),
'add_help_option': False,
'name': self.name,
'description': self.__doc__,
'isolated': isolated,
}
self.parser = ConfigOptionParser(**parser_kw)
# Commands should add options to this option group
optgroup_name = '%s Options' % self.name.capitalize()
self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
# Add the general options
gen_opts = cmdoptions.make_option_group(
cmdoptions.general_group,
self.parser,
)
self.parser.add_option_group(gen_opts)
def _build_session(self, options, retries=None, timeout=None):
session = PipSession(
cache=(
normalize_path(os.path.join(options.cache_dir, "http"))
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
insecure_hosts=options.trusted_hosts,
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
def parse_args(self, args):
# factored out for testability
return self.parser.parse_args(args)
def main(self, args):
options, args = self.parse_args(args)
# Set verbosity so that it can be used elsewhere.
self.verbosity = options.verbose - options.quiet
if self.verbosity >= 1:
level = "DEBUG"
elif self.verbosity == -1:
level = "WARNING"
elif self.verbosity == -2:
level = "ERROR"
elif self.verbosity <= -3:
level = "CRITICAL"
else:
level = "INFO"
# The root logger should match the "console" level *unless* we
# specified "--log" to send debug logs to a file.
root_level = level
if options.log:
root_level = "DEBUG"
logger_class = "pip._internal.utils.logging.ColorizedStreamHandler"
handler_class = "pip._internal.utils.logging.BetterRotatingFileHandler"
logging.config.dictConfig({
"version": 1,
"disable_existing_loggers": False,
"filters": {
"exclude_warnings": {
"()": "pip._internal.utils.logging.MaxLevelFilter",
"level": logging.WARNING,
},
},
"formatters": {
"indent": {
"()": IndentingFormatter,
"format": "%(message)s",
},
},
"handlers": {
"console": {
"level": level,
"class": logger_class,
"no_color": options.no_color,
"stream": self.log_streams[0],
"filters": ["exclude_warnings"],
"formatter": "indent",
},
"console_errors": {
"level": "WARNING",
"class": logger_class,
"no_color": options.no_color,
"stream": self.log_streams[1],
"formatter": "indent",
},
"user_log": {
"level": "DEBUG",
"class": handler_class,
"filename": options.log or "/dev/null",
"delay": True,
"formatter": "indent",
},
},
"root": {
"level": root_level,
"handlers": list(filter(None, [
"console",
"console_errors",
"user_log" if options.log else None,
])),
},
# Disable any logging besides WARNING unless we have DEBUG level
# logging enabled. These use both pip._vendor and the bare names
# for the case where someone unbundles our libraries.
"loggers": {
name: {
"level": (
"WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
)
} for name in [
"pip._vendor", "distlib", "requests", "urllib3"
]
},
})
if sys.version_info[:2] == (3, 3):
warnings.warn(
"Python 3.3 supported has been deprecated and support for it "
"will be dropped in the future. Please upgrade your Python.",
deprecation.RemovedInPip11Warning,
)
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
if options.no_input:
os.environ['PIP_NO_INPUT'] = '1'
if options.exists_action:
os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
if options.require_venv and not self.ignore_require_venv:
# If a venv is required check if it can really be found
if not running_under_virtualenv():
logger.critical(
'Could not find an activated virtualenv (required).'
)
sys.exit(VIRTUALENV_NOT_FOUND)
original_root_handlers = set(logging.root.handlers)
try:
status = self.run(options, args)
# FIXME: all commands should return an exit status
# and when it is done, isinstance is not needed anymore
if isinstance(status, int):
return status
except PreviousBuildDirError as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return PREVIOUS_BUILD_DIR_ERROR
except (InstallationError, UninstallationError, BadCommand) as exc:
logger.critical(str(exc))
logger.debug('Exception information:', exc_info=True)
return ERROR
except CommandError as exc:
logger.critical('ERROR: %s', exc)
logger.debug('Exception information:', exc_info=True)
return ERROR
except KeyboardInterrupt:
logger.critical('Operation cancelled by user')
logger.debug('Exception information:', exc_info=True)
return ERROR
except:
logger.critical('Exception:', exc_info=True)
return UNKNOWN_ERROR
finally:
# Check if we're using the latest version of pip available
if (not options.disable_pip_version_check and not
getattr(options, "no_index", False)):
with self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)) as session:
pip_version_check(session, options)
# Avoid leaking loggers
for handler in set(logging.root.handlers) - original_root_handlers:
# this method benefit from the Logger class internal lock
logging.root.removeHandler(handler)
return SUCCESS
class RequirementCommand(Command):
@staticmethod
def populate_requirement_set(requirement_set, args, options, finder,
session, name, wheel_cache):
"""
Marshal cmd line args into a requirement set.
"""
# NOTE: As a side-effect, options.require_hashes and
# requirement_set.require_hashes may be updated
for filename in options.constraints:
for req_to_add in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session, wheel_cache=wheel_cache):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in args:
req_to_add = InstallRequirement.from_line(
req, None, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for req in options.editables:
req_to_add = InstallRequirement.from_editable(
req,
isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
for filename in options.requirements:
for req_to_add in parse_requirements(
filename,
finder=finder, options=options, session=session,
wheel_cache=wheel_cache):
req_to_add.is_direct = True
requirement_set.add_requirement(req_to_add)
# If --require-hashes was a line in a requirements file, tell
# RequirementSet about it:
requirement_set.require_hashes = options.require_hashes
if not (args or options.editables or options.requirements):
opts = {'name': name}
if options.find_links:
raise CommandError(
'You must give at least one requirement to %(name)s '
'(maybe you meant "pip %(name)s %(links)s"?)' %
dict(opts, links=' '.join(options.find_links)))
else:
raise CommandError(
'You must give at least one requirement to %(name)s '
'(see "pip help %(name)s")' % opts)
# On Windows, any operation modifying pip should be run as:
# python -m pip ...
# See https://github.com/pypa/pip/issues/1299 for more discussion
should_show_use_python_msg = (
WINDOWS and
requirement_set.has_requirement('pip') and
"pip" in os.path.basename(sys.argv[0])
)
if should_show_use_python_msg:
new_command = [
sys.executable, "-m", "pip"
] + sys.argv[1:]
raise CommandError(
'To modify pip, please run the following command:\n{}'
.format(" ".join(new_command))
)
def _build_package_finder(self, options, session,
platform=None, python_versions=None,
abi=None, implementation=None):
"""
Create a package finder appropriate to this requirement command.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.debug('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
return PackageFinder(
find_links=options.find_links,
format_control=options.format_control,
index_urls=index_urls,
trusted_hosts=options.trusted_hosts,
allow_all_prereleases=options.pre,
process_dependency_links=options.process_dependency_links,
session=session,
platform=platform,
versions=python_versions,
abi=abi,
implementation=implementation,
)
|
|
"""Gaussian processes regression."""
# Authors: Jan Hendrik Metzen <[email protected]>
# Modified by: Pete Green <[email protected]>
# License: BSD 3 clause
import warnings
from operator import itemgetter
import numpy as np
from scipy.linalg import cholesky, cho_solve, solve_triangular
import scipy.optimize
from ..base import BaseEstimator, RegressorMixin, clone
from ..base import MultiOutputMixin
from .kernels import RBF, ConstantKernel as C
from ..preprocessing._data import _handle_zeros_in_scale
from ..utils import check_random_state
from ..utils.optimize import _check_optimize_result
GPR_CHOLESKY_LOWER = True
class GaussianProcessRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""Gaussian process regression (GPR).
The implementation is based on Algorithm 2.1 of [1]_.
In addition to standard scikit-learn estimator API,
:class:`GaussianProcessRegressor`:
* allows prediction without prior fitting (based on the GP prior)
* provides an additional method `sample_y(X)`, which evaluates samples
drawn from the GPR (prior or posterior) at given inputs
* exposes a method `log_marginal_likelihood(theta)`, which can be used
externally for other ways of selecting hyperparameters, e.g., via
Markov chain Monte Carlo.
Read more in the :ref:`User Guide <gaussian_process>`.
.. versionadded:: 0.18
Parameters
----------
kernel : kernel instance, default=None
The kernel specifying the covariance function of the GP. If None is
passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed"
* RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that
the kernel hyperparameters are optimized during fitting unless the
bounds are marked as "fixed".
alpha : float or ndarray of shape (n_samples,), default=1e-10
Value added to the diagonal of the kernel matrix during fitting.
This can prevent a potential numerical issue during fitting, by
ensuring that the calculated values form a positive definite matrix.
It can also be interpreted as the variance of additional Gaussian
measurement noise on the training observations. Note that this is
different from using a `WhiteKernel`. If an array is passed, it must
have the same number of entries as the data used for fitting and is
used as datapoint-dependent noise level. Allowing to specify the
noise level directly as a parameter is mainly for convenience and
for consistency with :class:`~sklearn.linear_model.Ridge`.
optimizer : "fmin_l_bfgs_b" or callable, default="fmin_l_bfgs_b"
Can either be one of the internally supported optimizers for optimizing
the kernel's parameters, specified by a string, or an externally
defined optimizer passed as a callable. If a callable is passed, it
must have the signature::
def optimizer(obj_func, initial_theta, bounds):
# * 'obj_func': the objective function to be minimized, which
# takes the hyperparameters theta as a parameter and an
# optional flag eval_gradient, which determines if the
# gradient is returned additionally to the function value
# * 'initial_theta': the initial value for theta, which can be
# used by local optimizers
# * 'bounds': the bounds on the values of theta
....
# Returned are the best found hyperparameters theta and
# the corresponding value of the target function.
return theta_opt, func_min
Per default, the L-BFGS-B algorithm from `scipy.optimize.minimize`
is used. If None is passed, the kernel's parameters are kept fixed.
Available internal optimizers are: `{'fmin_l_bfgs_b'}`.
n_restarts_optimizer : int, default=0
The number of restarts of the optimizer for finding the kernel's
parameters which maximize the log-marginal likelihood. The first run
of the optimizer is performed from the kernel's initial parameters,
the remaining ones (if any) from thetas sampled log-uniform randomly
from the space of allowed theta-values. If greater than 0, all bounds
must be finite. Note that `n_restarts_optimizer == 0` implies that one
run is performed.
normalize_y : bool, default=False
Whether or not to normalize the target values `y` by removing the mean
and scaling to unit-variance. This is recommended for cases where
zero-mean, unit-variance priors are used. Note that, in this
implementation, the normalisation is reversed before the GP predictions
are reported.
.. versionchanged:: 0.23
copy_X_train : bool, default=True
If True, a persistent copy of the training data is stored in the
object. Otherwise, just a reference to the training data is stored,
which might cause predictions to change if the data is modified
externally.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
X_train_ : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data (also
required for prediction).
y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values in training data (also required for prediction).
kernel_ : kernel instance
The kernel used for prediction. The structure of the kernel is the
same as the one passed as parameter but with optimized hyperparameters.
L_ : array-like of shape (n_samples, n_samples)
Lower-triangular Cholesky decomposition of the kernel in ``X_train_``.
alpha_ : array-like of shape (n_samples,)
Dual coefficients of training data points in kernel space.
log_marginal_likelihood_value_ : float
The log-marginal-likelihood of ``self.kernel_.theta``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
GaussianProcessClassifier : Gaussian process classification (GPC)
based on Laplace approximation.
References
----------
.. [1] `Rasmussen, Carl Edward.
"Gaussian processes in machine learning."
Summer school on machine learning. Springer, Berlin, Heidelberg, 2003
<http://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_.
Examples
--------
>>> from sklearn.datasets import make_friedman2
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel
>>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0)
>>> kernel = DotProduct() + WhiteKernel()
>>> gpr = GaussianProcessRegressor(kernel=kernel,
... random_state=0).fit(X, y)
>>> gpr.score(X, y)
0.3680...
>>> gpr.predict(X[:2,:], return_std=True)
(array([653.0..., 592.1...]), array([316.6..., 316.6...]))
"""
def __init__(
self,
kernel=None,
*,
alpha=1e-10,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=0,
normalize_y=False,
copy_X_train=True,
random_state=None,
):
self.kernel = kernel
self.alpha = alpha
self.optimizer = optimizer
self.n_restarts_optimizer = n_restarts_optimizer
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
GaussianProcessRegressor class instance.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
if self.kernel_.requires_vector_input:
dtype, ensure_2d = "numeric", True
else:
dtype, ensure_2d = None, False
X, y = self._validate_data(
X,
y,
multi_output=True,
y_numeric=True,
ensure_2d=ensure_2d,
dtype=dtype,
)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False)
# Remove mean and make unit variance
y = (y - self._y_train_mean) / self._y_train_std
else:
self._y_train_mean = np.zeros(1)
self._y_train_std = 1
if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError(
"alpha must be a scalar or an array with same number of "
f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})"
)
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def obj_func(theta, eval_gradient=True):
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
return -lml, -grad
else:
return -self.log_marginal_likelihood(theta, clone_kernel=False)
# First optimize starting from theta specified in kernel
optima = [
(
self._constrained_optimization(
obj_func, self.kernel_.theta, self.kernel_.bounds
)
)
]
# Additional runs are performed from log-uniform chosen initial
# theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite."
)
bounds = self.kernel_.bounds
for iteration in range(self.n_restarts_optimizer):
theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
self._constrained_optimization(obj_func, theta_initial, bounds)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.kernel_._check_bounds_params()
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = self.log_marginal_likelihood(
self.kernel_.theta, clone_kernel=False
)
# Precompute quantities required for predictions which are independent
# of actual query points
# Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
except np.linalg.LinAlgError as exc:
exc.args = (
f"The kernel, {self.kernel_}, is not returning a positive "
"definite matrix. Try gradually increasing the 'alpha' "
"parameter of your GaussianProcessRegressor estimator.",
) + exc.args
raise
# Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
self.alpha_ = cho_solve(
(self.L_, GPR_CHOLESKY_LOWER),
self.y_train_,
check_finite=False,
)
return self
def predict(self, X, return_std=False, return_cov=False):
"""Predict using the Gaussian process regression model.
We can also predict based on an unfitted model by using the GP prior.
In addition to the mean of the predictive distribution, optionally also
returns its standard deviation (`return_std=True`) or covariance
(`return_cov=True`). Note that at most one of the two can be requested.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Query points where the GP is evaluated.
return_std : bool, default=False
If True, the standard-deviation of the predictive distribution at
the query points is returned along with the mean.
return_cov : bool, default=False
If True, the covariance of the joint predictive distribution at
the query points is returned along with the mean.
Returns
-------
y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets)
Mean of predictive distribution a query points.
y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional
Standard deviation of predictive distribution at query points.
Only returned when `return_std` is True.
y_cov : ndarray of shape (n_samples, n_samples) or \
(n_samples, n_samples, n_targets), optional
Covariance of joint predictive distribution a query points.
Only returned when `return_cov` is True.
"""
if return_std and return_cov:
raise RuntimeError(
"At most one of return_std or return_cov can be requested."
)
if self.kernel is None or self.kernel.requires_vector_input:
dtype, ensure_2d = "numeric", True
else:
dtype, ensure_2d = None, False
X = self._validate_data(X, ensure_2d=ensure_2d, dtype=dtype, reset=False)
if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior
if self.kernel is None:
kernel = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
else:
kernel = self.kernel
y_mean = np.zeros(X.shape[0])
if return_cov:
y_cov = kernel(X)
return y_mean, y_cov
elif return_std:
y_var = kernel.diag(X)
return y_mean, np.sqrt(y_var)
else:
return y_mean
else: # Predict based on GP posterior
# Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha
K_trans = self.kernel_(X, self.X_train_)
y_mean = K_trans @ self.alpha_
# undo normalisation
y_mean = self._y_train_std * y_mean + self._y_train_mean
# Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T
V = solve_triangular(
self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False
)
if return_cov:
# Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v
y_cov = self.kernel_(X) - V.T @ V
# undo normalisation
y_cov = np.outer(y_cov, self._y_train_std ** 2).reshape(
*y_cov.shape, -1
)
# if y_cov has shape (n_samples, n_samples, 1), reshape to
# (n_samples, n_samples)
if y_cov.shape[2] == 1:
y_cov = np.squeeze(y_cov, axis=2)
return y_mean, y_cov
elif return_std:
# Compute variance of predictive distribution
# Use einsum to avoid explicitly forming the large matrix
# V^T @ V just to extract its diagonal afterward.
y_var = self.kernel_.diag(X)
y_var -= np.einsum("ij,ji->i", V.T, V)
# Check if any of the variances is negative because of
# numerical issues. If yes: set the variance to 0.
y_var_negative = y_var < 0
if np.any(y_var_negative):
warnings.warn(
"Predicted variances smaller than 0. "
"Setting those variances to 0."
)
y_var[y_var_negative] = 0.0
# undo normalisation
y_var = np.outer(y_var, self._y_train_std ** 2).reshape(
*y_var.shape, -1
)
# if y_var has shape (n_samples, 1), reshape to (n_samples,)
if y_var.shape[1] == 1:
y_var = np.squeeze(y_var, axis=1)
return y_mean, np.sqrt(y_var)
else:
return y_mean
def sample_y(self, X, n_samples=1, random_state=0):
"""Draw samples from Gaussian process and evaluate at X.
Parameters
----------
X : array-like of shape (n_samples_X, n_features) or list of object
Query points where the GP is evaluated.
n_samples : int, default=1
Number of samples drawn from the Gaussian process per query point.
random_state : int, RandomState instance or None, default=0
Determines random number generation to randomly draw samples.
Pass an int for reproducible results across multiple function
calls.
See :term:`Glossary <random_state>`.
Returns
-------
y_samples : ndarray of shape (n_samples_X, n_samples), or \
(n_samples_X, n_targets, n_samples)
Values of n_samples samples drawn from Gaussian process and
evaluated at query points.
"""
rng = check_random_state(random_state)
y_mean, y_cov = self.predict(X, return_cov=True)
if y_mean.ndim == 1:
y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T
else:
y_samples = [
rng.multivariate_normal(y_mean[:, i], y_cov, n_samples).T[:, np.newaxis]
for i in range(y_mean.shape[1])
]
y_samples = np.hstack(y_samples)
return y_samples
def log_marginal_likelihood(
self, theta=None, eval_gradient=False, clone_kernel=True
):
"""Return log-marginal likelihood of theta for training data.
Parameters
----------
theta : array-like of shape (n_kernel_params,) default=None
Kernel hyperparameters for which the log-marginal likelihood is
evaluated. If None, the precomputed log_marginal_likelihood
of ``self.kernel_.theta`` is returned.
eval_gradient : bool, default=False
If True, the gradient of the log-marginal likelihood with respect
to the kernel hyperparameters at position theta is returned
additionally. If True, theta must not be None.
clone_kernel : bool, default=True
If True, the kernel attribute is copied. If False, the kernel
attribute is modified, but may result in a performance improvement.
Returns
-------
log_likelihood : float
Log-marginal likelihood of theta for training data.
log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional
Gradient of the log-marginal likelihood with respect to the kernel
hyperparameters at position theta.
Only returned when eval_gradient is True.
"""
if theta is None:
if eval_gradient:
raise ValueError("Gradient can only be evaluated for theta!=None")
return self.log_marginal_likelihood_value_
if clone_kernel:
kernel = self.kernel_.clone_with_theta(theta)
else:
kernel = self.kernel_
kernel.theta = theta
if eval_gradient:
K, K_gradient = kernel(self.X_train_, eval_gradient=True)
else:
K = kernel(self.X_train_)
# Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I)
K[np.diag_indices_from(K)] += self.alpha
try:
L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False)
except np.linalg.LinAlgError:
return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf
# Support multi-dimensional output of self.y_train_
y_train = self.y_train_
if y_train.ndim == 1:
y_train = y_train[:, np.newaxis]
# Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y)
alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False)
# Alg 2.1, page 19, line 7
# -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi)
# y is originally thought to be a (1, n_samples) row vector. However,
# in multioutputs, y is of shape (n_samples, 2) and we need to compute
# y^T . alpha for each output, independently using einsum. Thus, it
# is equivalent to:
# for output_idx in range(n_outputs):
# log_likelihood_dims[output_idx] = (
# y_train[:, [output_idx]] @ alpha[:, [output_idx]]
# )
log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha)
log_likelihood_dims -= np.log(np.diag(L)).sum()
log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi)
# the log likehood is sum-up across the outputs
log_likelihood = log_likelihood_dims.sum(axis=-1)
if eval_gradient:
# Eq. 5.9, p. 114, and footnote 5 in p. 114
# 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient)
# alpha is supposed to be a vector of (n_samples,) elements. With
# multioutputs, alpha is a matrix of size (n_samples, n_outputs).
# Therefore, we want to construct a matrix of
# (n_samples, n_samples, n_outputs) equivalent to
# for output_idx in range(n_outputs):
# output_alpha = alpha[:, [output_idx]]
# inner_term[..., output_idx] = output_alpha @ output_alpha.T
inner_term = np.einsum("ik,jk->ijk", alpha, alpha)
# compute K^-1 of shape (n_samples, n_samples)
K_inv = cho_solve(
(L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False
)
# create a new axis to use broadcasting between inner_term and
# K_inv
inner_term -= K_inv[..., np.newaxis]
# Since we are interested about the trace of
# inner_term @ K_gradient, we don't explicitly compute the
# matrix-by-matrix operation and instead use an einsum. Therefore
# it is equivalent to:
# for param_idx in range(n_kernel_params):
# for output_idx in range(n_output):
# log_likehood_gradient_dims[param_idx, output_idx] = (
# inner_term[..., output_idx] @
# K_gradient[..., param_idx]
# )
log_likelihood_gradient_dims = 0.5 * np.einsum(
"ijl,jik->kl", inner_term, K_gradient
)
# the log likehood gradient is the sum-up across the outputs
log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1)
if eval_gradient:
return log_likelihood, log_likelihood_gradient
else:
return log_likelihood
def _constrained_optimization(self, obj_func, initial_theta, bounds):
if self.optimizer == "fmin_l_bfgs_b":
opt_res = scipy.optimize.minimize(
obj_func,
initial_theta,
method="L-BFGS-B",
jac=True,
bounds=bounds,
)
_check_optimize_result("lbfgs", opt_res)
theta_opt, func_min = opt_res.x, opt_res.fun
elif callable(self.optimizer):
theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds)
else:
raise ValueError(f"Unknown optimizer {self.optimizer}.")
return theta_opt, func_min
def _more_tags(self):
return {"requires_fit": False}
|
|
from __future__ import unicode_literals
import json
import six
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from .models import dynamodb_backend, dynamo_json_dump
GET_SESSION_TOKEN_RESULT = """
<GetSessionTokenResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<GetSessionTokenResult>
<Credentials>
<SessionToken>
AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/L
To6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3z
rkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtp
Z3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE
</SessionToken>
<SecretAccessKey>
wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY
</SecretAccessKey>
<Expiration>2011-07-11T19:55:29.611Z</Expiration>
<AccessKeyId>AKIAIOSFODNN7EXAMPLE</AccessKeyId>
</Credentials>
</GetSessionTokenResult>
<ResponseMetadata>
<RequestId>58c5dbae-abef-11e0-8cfe-09039844ac7d</RequestId>
</ResponseMetadata>
</GetSessionTokenResponse>"""
def sts_handler():
return GET_SESSION_TOKEN_RESULT
class DynamoHandler(BaseResponse):
def get_endpoint_name(self, headers):
"""Parses request headers and extracts part od the X-Amz-Target
that corresponds to a method of DynamoHandler
ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
"""
# Headers are case-insensitive. Probably a better way to do this.
match = headers.get('x-amz-target') or headers.get('X-Amz-Target')
if match:
return match.split(".")[1]
def error(self, type_, status=400):
return status, self.response_headers, dynamo_json_dump({'__type': type_})
def call_action(self):
body = self.body.decode('utf-8')
if 'GetSessionToken' in body:
return 200, self.response_headers, sts_handler()
self.body = json.loads(body or '{}')
endpoint = self.get_endpoint_name(self.headers)
if endpoint:
endpoint = camelcase_to_underscores(endpoint)
response = getattr(self, endpoint)()
if isinstance(response, six.string_types):
return 200, self.response_headers, response
else:
status_code, new_headers, response_content = response
self.response_headers.update(new_headers)
return status_code, self.response_headers, response_content
else:
return 404, self.response_headers, ""
def list_tables(self):
body = self.body
limit = body.get('Limit')
if body.get("ExclusiveStartTableName"):
last = body.get("ExclusiveStartTableName")
start = list(dynamodb_backend.tables.keys()).index(last) + 1
else:
start = 0
all_tables = list(dynamodb_backend.tables.keys())
if limit:
tables = all_tables[start:start + limit]
else:
tables = all_tables[start:]
response = {"TableNames": tables}
if limit and len(all_tables) > start + limit:
response["LastEvaluatedTableName"] = tables[-1]
return dynamo_json_dump(response)
def create_table(self):
body = self.body
name = body['TableName']
key_schema = body['KeySchema']
hash_hey = key_schema['HashKeyElement']
hash_key_attr = hash_hey['AttributeName']
hash_key_type = hash_hey['AttributeType']
range_hey = key_schema.get('RangeKeyElement', {})
range_key_attr = range_hey.get('AttributeName')
range_key_type = range_hey.get('AttributeType')
throughput = body["ProvisionedThroughput"]
read_units = throughput["ReadCapacityUnits"]
write_units = throughput["WriteCapacityUnits"]
table = dynamodb_backend.create_table(
name,
hash_key_attr=hash_key_attr,
hash_key_type=hash_key_type,
range_key_attr=range_key_attr,
range_key_type=range_key_type,
read_capacity=int(read_units),
write_capacity=int(write_units),
)
return dynamo_json_dump(table.describe)
def delete_table(self):
name = self.body['TableName']
table = dynamodb_backend.delete_table(name)
if table:
return dynamo_json_dump(table.describe)
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
def update_table(self):
name = self.body['TableName']
throughput = self.body["ProvisionedThroughput"]
new_read_units = throughput["ReadCapacityUnits"]
new_write_units = throughput["WriteCapacityUnits"]
table = dynamodb_backend.update_table_throughput(name, new_read_units, new_write_units)
return dynamo_json_dump(table.describe)
def describe_table(self):
name = self.body['TableName']
try:
table = dynamodb_backend.tables[name]
except KeyError:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
return dynamo_json_dump(table.describe)
def put_item(self):
name = self.body['TableName']
item = self.body['Item']
result = dynamodb_backend.put_item(name, item)
if result:
item_dict = result.to_json()
item_dict['ConsumedCapacityUnits'] = 1
return dynamo_json_dump(item_dict)
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
def batch_write_item(self):
table_batches = self.body['RequestItems']
for table_name, table_requests in table_batches.items():
for table_request in table_requests:
request_type = list(table_request)[0]
request = list(table_request.values())[0]
if request_type == 'PutRequest':
item = request['Item']
dynamodb_backend.put_item(table_name, item)
elif request_type == 'DeleteRequest':
key = request['Key']
hash_key = key['HashKeyElement']
range_key = key.get('RangeKeyElement')
item = dynamodb_backend.delete_item(table_name, hash_key, range_key)
response = {
"Responses": {
"Thread": {
"ConsumedCapacityUnits": 1.0
},
"Reply": {
"ConsumedCapacityUnits": 1.0
}
},
"UnprocessedItems": {}
}
return dynamo_json_dump(response)
def get_item(self):
name = self.body['TableName']
key = self.body['Key']
hash_key = key['HashKeyElement']
range_key = key.get('RangeKeyElement')
attrs_to_get = self.body.get('AttributesToGet')
try:
item = dynamodb_backend.get_item(name, hash_key, range_key)
except ValueError:
er = 'com.amazon.coral.validate#ValidationException'
return self.error(er, status=400)
if item:
item_dict = item.describe_attrs(attrs_to_get)
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
# Item not found
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er, status=404)
def batch_get_item(self):
table_batches = self.body['RequestItems']
results = {
"Responses": {
"UnprocessedKeys": {}
}
}
for table_name, table_request in table_batches.items():
items = []
keys = table_request['Keys']
attributes_to_get = table_request.get('AttributesToGet')
for key in keys:
hash_key = key["HashKeyElement"]
range_key = key.get("RangeKeyElement")
item = dynamodb_backend.get_item(table_name, hash_key, range_key)
if item:
item_describe = item.describe_attrs(attributes_to_get)
items.append(item_describe)
results["Responses"][table_name] = {"Items": items, "ConsumedCapacityUnits": 1}
return dynamo_json_dump(results)
def query(self):
name = self.body['TableName']
hash_key = self.body['HashKeyValue']
range_condition = self.body.get('RangeKeyCondition')
if range_condition:
range_comparison = range_condition['ComparisonOperator']
range_values = range_condition['AttributeValueList']
else:
range_comparison = None
range_values = []
items, last_page = dynamodb_backend.query(name, hash_key, range_comparison, range_values)
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
result = {
"Count": len(items),
"Items": [item.attrs for item in items],
"ConsumedCapacityUnits": 1,
}
# Implement this when we do pagination
# if not last_page:
# result["LastEvaluatedKey"] = {
# "HashKeyElement": items[-1].hash_key,
# "RangeKeyElement": items[-1].range_key,
# }
return dynamo_json_dump(result)
def scan(self):
name = self.body['TableName']
filters = {}
scan_filters = self.body.get('ScanFilter', {})
for attribute_name, scan_filter in scan_filters.items():
# Keys are attribute names. Values are tuples of (comparison, comparison_value)
comparison_operator = scan_filter["ComparisonOperator"]
comparison_values = scan_filter.get("AttributeValueList", [])
filters[attribute_name] = (comparison_operator, comparison_values)
items, scanned_count, last_page = dynamodb_backend.scan(name, filters)
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
result = {
"Count": len(items),
"Items": [item.attrs for item in items if item],
"ConsumedCapacityUnits": 1,
"ScannedCount": scanned_count
}
# Implement this when we do pagination
# if not last_page:
# result["LastEvaluatedKey"] = {
# "HashKeyElement": items[-1].hash_key,
# "RangeKeyElement": items[-1].range_key,
# }
return dynamo_json_dump(result)
def delete_item(self):
name = self.body['TableName']
key = self.body['Key']
hash_key = key['HashKeyElement']
range_key = key.get('RangeKeyElement')
return_values = self.body.get('ReturnValues', '')
item = dynamodb_backend.delete_item(name, hash_key, range_key)
if item:
if return_values == 'ALL_OLD':
item_dict = item.to_json()
else:
item_dict = {'Attributes': []}
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
|
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import re
import sys
import platform
from setuptools import setup
from setuptools.command.test import test as TestCommand
# remember if we already had six _before_ installation
try:
import six # noqa
_HAD_SIX = True
except ImportError:
_HAD_SIX = False
CPY = platform.python_implementation() == 'CPython'
PY3 = sys.version_info >= (3,)
PY33 = (3, 3) <= sys.version_info < (3, 4)
LONGSDESC = open('README.rst').read()
# get version string from "autobahn/__init__.py"
# See: http://stackoverflow.com/a/7071358/884770
#
VERSIONFILE = "autobahn/__init__.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
# Autobahn core packages
#
packages = [
'autobahn',
'autobahn.wamp',
'autobahn.wamp.test',
'autobahn.websocket',
'autobahn.websocket.test',
'autobahn.asyncio',
'autobahn.twisted',
'twisted.plugins'
]
# Twisted dependencies
#
extras_require_twisted = ["zope.interface>=3.6", "Twisted>=12.1"]
# asyncio dependencies
#
if PY3:
if PY33:
# "Tulip"
extras_require_asyncio = ["asyncio>=0.2.1"]
else:
# Python 3.4+ has asyncio builtin
extras_require_asyncio = []
else:
# backport of asyncio
extras_require_asyncio = ["trollius>=1.0.4", "futures>=3.0.3"]
# C-based WebSocket acceleration
#
extras_require_accelerate = ["wsaccel>=0.6.2", "ujson>=1.33"] if CPY else []
# non-standard WebSocket compression support
#
extras_require_compress = ["python-snappy>=0.5", "lz4>=0.2.1"]
# non-JSON WAMP serialization support (namely MsgPack)
#
extras_require_serialization = ["msgpack-python>=0.4.0"]
# everything
#
extras_require_all = extras_require_twisted + extras_require_asyncio + \
extras_require_accelerate + extras_require_compress + extras_require_serialization
# development dependencies
#
extras_require_dev = ["pep8", "flake8", "mock>=1.0.1", "pytest>=2.6.4", "unittest2>=1.1.0"]
# for testing by users with "python setup.py test" (not Tox, which we use)
#
test_requirements = ["pytest", "mock>=1.0.1"]
# pytest integration for setuptools. see:
# http://pytest.org/latest/goodpractises.html#integration-with-setuptools-test-commands
# https://github.com/pyca/cryptography/pull/678/files
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Import here because in module scope the eggs are not loaded.
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
# Now install Autobahn ..
#
setup(
name='autobahn',
version=verstr,
description='WebSocket client & server library, WAMP real-time framework',
long_description=LONGSDESC,
license='MIT License',
author='Tavendo GmbH',
author_email='[email protected]',
url='http://autobahn.ws/python',
platforms='Any',
install_requires=[
'six>=1.6.1',
'txaio>=1.0.3'
],
extras_require={
'all': extras_require_all,
'asyncio': extras_require_asyncio,
'twisted': extras_require_twisted,
'accelerate': extras_require_accelerate,
'compress': extras_require_compress,
'serialization': extras_require_serialization,
'dev': extras_require_dev,
},
tests_require=test_requirements,
cmdclass={'test': PyTest},
packages=packages,
zip_safe=False,
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
#
classifiers=["License :: OSI Approved :: MIT License",
"Development Status :: 5 - Production/Stable",
"Environment :: No Input/Output (Daemon)",
"Framework :: Twisted",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: Jython",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Communications",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Object Brokering"],
keywords='autobahn autobahn.ws websocket realtime rfc6455 wamp rpc pubsub twisted asyncio'
)
try:
from twisted.internet import reactor
print("Twisted found (default reactor is {0})".format(reactor.__class__))
except ImportError:
# the user doesn't have Twisted, so skip
pass
else:
# Make Twisted regenerate the dropin.cache, if possible. This is necessary
# because in a site-wide install, dropin.cache cannot be rewritten by
# normal users.
if _HAD_SIX:
# only proceed if we had had six already _before_ installing AutobahnPython,
# since it produces errs/warns otherwise
try:
from twisted.plugin import IPlugin, getPlugins
list(getPlugins(IPlugin))
except Exception as e:
print("Failed to update Twisted plugin cache: {0}".format(e))
else:
print("Twisted dropin.cache regenerated.")
else:
print("Warning: regenerate of Twisted dropin.cache skipped (can't run when six wasn't there before)")
|
|
"""Read the balance of your bank accounts via FinTS."""
from __future__ import annotations
from collections import namedtuple
from datetime import timedelta
import logging
from typing import Any
from fints.client import FinTS3PinTanClient
from fints.dialog import FinTSDialogError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_NAME, CONF_PIN, CONF_URL, CONF_USERNAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(hours=4)
ICON = "mdi:currency-eur"
BankCredentials = namedtuple("BankCredentials", "blz login pin url")
CONF_BIN = "bank_identification_number"
CONF_ACCOUNTS = "accounts"
CONF_HOLDINGS = "holdings"
CONF_ACCOUNT = "account"
ATTR_ACCOUNT = CONF_ACCOUNT
ATTR_BANK = "bank"
ATTR_ACCOUNT_TYPE = "account_type"
SCHEMA_ACCOUNTS = vol.Schema(
{
vol.Required(CONF_ACCOUNT): cv.string,
vol.Optional(CONF_NAME, default=None): vol.Any(None, cv.string),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BIN): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PIN): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ACCOUNTS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
vol.Optional(CONF_HOLDINGS, default=[]): cv.ensure_list(SCHEMA_ACCOUNTS),
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the sensors.
Login to the bank and get a list of existing accounts. Create a
sensor for each account.
"""
credentials = BankCredentials(
config[CONF_BIN], config[CONF_USERNAME], config[CONF_PIN], config[CONF_URL]
)
fints_name = config.get(CONF_NAME, config[CONF_BIN])
account_config = {
acc[CONF_ACCOUNT]: acc[CONF_NAME] for acc in config[CONF_ACCOUNTS]
}
holdings_config = {
acc[CONF_ACCOUNT]: acc[CONF_NAME] for acc in config[CONF_HOLDINGS]
}
client = FinTsClient(credentials, fints_name)
balance_accounts, holdings_accounts = client.detect_accounts()
accounts: list[SensorEntity] = []
for account in balance_accounts:
if config[CONF_ACCOUNTS] and account.iban not in account_config:
_LOGGER.info("Skipping account %s for bank %s", account.iban, fints_name)
continue
if not (account_name := account_config.get(account.iban)):
account_name = f"{fints_name} - {account.iban}"
accounts.append(FinTsAccount(client, account, account_name))
_LOGGER.debug("Creating account %s for bank %s", account.iban, fints_name)
for account in holdings_accounts:
if config[CONF_HOLDINGS] and account.accountnumber not in holdings_config:
_LOGGER.info(
"Skipping holdings %s for bank %s", account.accountnumber, fints_name
)
continue
account_name = holdings_config.get(account.accountnumber)
if not account_name:
account_name = f"{fints_name} - {account.accountnumber}"
accounts.append(FinTsHoldingsAccount(client, account, account_name))
_LOGGER.debug(
"Creating holdings %s for bank %s", account.accountnumber, fints_name
)
add_entities(accounts, True)
class FinTsClient:
"""Wrapper around the FinTS3PinTanClient.
Use this class as Context Manager to get the FinTS3Client object.
"""
def __init__(self, credentials: BankCredentials, name: str) -> None:
"""Initialize a FinTsClient."""
self._credentials = credentials
self.name = name
@property
def client(self):
"""Get the client object.
As the fints library is stateless, there is not benefit in caching
the client objects. If that ever changes, consider caching the client
object and also think about potential concurrency problems.
"""
return FinTS3PinTanClient(
self._credentials.blz,
self._credentials.login,
self._credentials.pin,
self._credentials.url,
)
def detect_accounts(self):
"""Identify the accounts of the bank."""
balance_accounts = []
holdings_accounts = []
for account in self.client.get_sepa_accounts():
try:
self.client.get_balance(account)
balance_accounts.append(account)
except IndexError:
# account is not a balance account.
pass
except FinTSDialogError:
# account is not a balance account.
pass
try:
self.client.get_holdings(account)
holdings_accounts.append(account)
except FinTSDialogError:
# account is not a holdings account.
pass
return balance_accounts, holdings_accounts
class FinTsAccount(SensorEntity):
"""Sensor for a FinTS balance account.
A balance account contains an amount of money (=balance). The amount may
also be negative.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs balance account."""
self._client = client
self._account = account
self._attr_name = name
self._attr_icon = ICON
self._attr_extra_state_attributes = {
ATTR_ACCOUNT: self._account.iban,
ATTR_ACCOUNT_TYPE: "balance",
}
if self._client.name:
self._attr_extra_state_attributes[ATTR_BANK] = self._client.name
def update(self) -> None:
"""Get the current balance and currency for the account."""
bank = self._client.client
balance = bank.get_balance(self._account)
self._attr_native_value = balance.amount.amount
self._attr_native_unit_of_measurement = balance.amount.currency
_LOGGER.debug("updated balance of account %s", self.name)
class FinTsHoldingsAccount(SensorEntity):
"""Sensor for a FinTS holdings account.
A holdings account does not contain money but rather some financial
instruments, e.g. stocks.
"""
def __init__(self, client: FinTsClient, account, name: str) -> None:
"""Initialize a FinTs holdings account."""
self._client = client
self._attr_name = name
self._account = account
self._holdings: list[Any] = []
self._attr_icon = ICON
self._attr_native_unit_of_measurement = "EUR"
def update(self) -> None:
"""Get the current holdings for the account."""
bank = self._client.client
self._holdings = bank.get_holdings(self._account)
self._attr_native_value = sum(h.total_value for h in self._holdings)
@property
def extra_state_attributes(self) -> dict:
"""Additional attributes of the sensor.
Lists each holding of the account with the current value.
"""
attributes = {
ATTR_ACCOUNT: self._account.accountnumber,
ATTR_ACCOUNT_TYPE: "holdings",
}
if self._client.name:
attributes[ATTR_BANK] = self._client.name
for holding in self._holdings:
total_name = f"{holding.name} total"
attributes[total_name] = holding.total_value
pieces_name = f"{holding.name} pieces"
attributes[pieces_name] = holding.pieces
price_name = f"{holding.name} price"
attributes[price_name] = holding.market_value
return attributes
|
|
#!/usr/bin/python2
#
# Copyright 2018 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Functions for data source to DSPL model conversion."""
from __future__ import print_function
__author__ = 'Benjamin Yolken <[email protected]>'
import itertools
import data_source
from dspllib.model import dspl_model
def _CalculateSlices(column_bundle):
"""Calculate all the possible slices to be produced from a column bundle.
Args:
column_bundle: A DataSourceColumnBundle object produced by a data source
Returns:
A sequence of DataSourceColumn sequences; each sequence contains the columns
for one slice.
"""
all_slices = []
binary_elements = []
non_binary_elements = []
child_parent_dict = {}
for column in column_bundle.GetColumnIterator():
if column.rollup:
binary_elements.append(column)
else:
non_binary_elements.append(column)
if column.parent_ref:
child_parent_dict[column] = column_bundle.GetColumnByID(column.parent_ref)
# Expand out slices using powerset operator
for selection in _Powerset(binary_elements):
transformed_slice = non_binary_elements + list(selection)
all_slices.append([s for s in transformed_slice])
# Prune slices that contain both a concept and an ancestor
slices_to_evaluate = []
for data_slice in all_slices:
keep_slice = True
for key in child_parent_dict.keys():
if key in data_slice:
curr_val = child_parent_dict[key]
# Loop through concept ancestors
while True:
if curr_val in data_slice:
keep_slice = False
break
elif curr_val in child_parent_dict:
curr_val = child_parent_dict[curr_val]
else:
break
if not keep_slice:
break
if keep_slice:
slices_to_evaluate.append(data_slice)
return slices_to_evaluate
def _Powerset(input_list):
"""Create powerset iterator from the elements in a list.
Note: Based on example in official Python itertools documentation.
Example:
[p for p in Powerset(['a', 'b')] == [(), ('a',), ('b'), ('a', 'b')]
Args:
input_list: A sequence of Python objects
Returns:
An iterator which loops through all (tuple) elements in the powerset of
the input_list.
"""
return (
itertools.chain.from_iterable(
itertools.combinations(input_list, r) for r
in range(len(input_list) + 1)))
def _CreateConceptTable(
column, instance_data, parent_column=None, verbose=True):
"""Create a DSPL table object that enumerates the instances of a concept.
If the concept extends 'entity:entity' or 'geo:location', extra columns
are added to the resulting table for the required inherited properties.
Otherwise, the table has just a single column.
By convention, the table id is given as [column_id]_table and the
corresponding CSV is named [column_id]_table.csv.
Args:
column: A DataSourceColumn object corresponding to a dataset dimension
instance_data: A TableRows object containing a list of concept instances
parent_column: A DataSourceColumn object corresponding to the parent
of this concept
verbose: Print out status messages to stdout
Returns:
A DSPL Table object
"""
dspl_columns = [dspl_model.TableColumn(column_id=column.column_id,
data_type=column.data_type)]
if parent_column:
dspl_columns += [dspl_model.TableColumn(column_id=parent_column.column_id,
data_type=parent_column.data_type)]
if column.concept_extension == 'entity:entity':
# Add a 'name' column and populate it with the instance IDs
dspl_columns += [
dspl_model.TableColumn(column_id='name', data_type='string')]
dspl_table_data = instance_data.MergeValues(instance_data).rows
elif column.concept_extension == 'geo:location':
# Add 'name', 'latitude', and 'longitude' columns; populate the first with
# the instance IDs, the others with blank values
dspl_columns += [
dspl_model.TableColumn(column_id='name', data_type='string'),
dspl_model.TableColumn(column_id='latitude', data_type='float'),
dspl_model.TableColumn(column_id='longitude', data_type='float')]
dspl_table_data = (instance_data.MergeValues(instance_data)
.MergeConstant('').MergeConstant('').rows)
else:
dspl_table_data = instance_data.rows
# Create table, including header row in table data
concept_table = dspl_model.Table(
table_id='%s_table' % (column.column_id),
columns=dspl_columns,
file_name='%s_table.csv' % (column.column_id),
table_data=[[c.column_id for c in dspl_columns]] + dspl_table_data,
verbose=verbose)
return concept_table
def _CreateSliceTable(slice_columns, table_id, file_name,
slice_data, verbose=True):
"""Create a DSPL Table object for a dataset slice.
Args:
slice_columns: Sequence of DataSourceColumn objects representing concepts
in this slice
table_id: ID for the table
file_name: Name of the CSV file containing the table data
slice_data: A TableRows object containing the data for the slice table
verbose: Print out status messages to stdout
Returns:
A DSPL Table object
"""
dspl_columns = [
dspl_model.TableColumn(c.column_id, c.data_type, c.data_format)
for c in slice_columns]
# Create table, including header row in table data
slice_table = dspl_model.Table(
table_id=table_id,
columns=dspl_columns,
file_name=file_name,
table_data=[[c.column_id for c in dspl_columns]] + slice_data.rows,
verbose=verbose)
return slice_table
def PopulateDataset(data_source_obj, verbose):
"""Create a DSPL dataset from a data source.
Loops through the set of possible slices (provided by the _CalculateSlices
function), creating the necessary DSPL concept, slice, and table objects as
needed.
The following naming convention is used:
DSPL concept ID := DataSource column ID
DSPL table ID for concept tables := DSPL concept ID + "_table"
DSPL slice ID := "slice_" + n, where n=0,1,2,...
DSPL table ID for slice tables := DSPL slice ID + "_table"
DSPL table file name := DSPL table ID + ".csv"
Args:
data_source_obj: An object that implements the DataSource interface
verbose: Print out status messages to stdout
Returns:
A DSPL DataSet object
"""
column_bundle = data_source_obj.GetColumnBundle()
dataset = dspl_model.DataSet(verbose=verbose)
# Add standard imports
dataset.AddImport(
dspl_model.Import(
namespace_id='entity',
namespace_url=(
'http://www.google.com/publicdata/dataset/google/entity')))
dataset.AddImport(
dspl_model.Import(
namespace_id='geo',
namespace_url=(
'http://www.google.com/publicdata/dataset/google/geo')))
dataset.AddImport(
dspl_model.Import(
namespace_id='geo_us',
namespace_url=(
'http://www.google.com/publicdata/dataset/google/geo/us')))
dataset.AddImport(
dspl_model.Import(
namespace_id='quantity',
namespace_url=(
'http://www.google.com/publicdata/dataset/google/quantity')))
dataset.AddImport(
dspl_model.Import(
namespace_id='time',
namespace_url=(
'http://www.google.com/publicdata/dataset/google/time')))
dataset.AddImport(
dspl_model.Import(
namespace_id='unit',
namespace_url=(
'http://www.google.com/publicdata/dataset/google/unit')))
# Store concept ID to column ID mappings for imported dimension concepts
dimension_map = {}
# Generate concept metadata
for column in column_bundle.GetColumnIterator():
if column.slice_role == 'metric':
metric_concept = dspl_model.Concept(
concept_id=column.column_id,
concept_extension_reference=column.concept_extension,
data_type=column.data_type)
dataset.AddConcept(metric_concept)
else:
# Column corresponds to a dimension concept
if column.concept_ref:
# Dimension concept is imported; no need to enumerate instances
dimension_concept = dspl_model.Concept(
concept_id=column.concept_ref,
concept_reference=column.concept_ref,
data_type=column.data_type)
dimension_map[column.concept_ref] = column.column_id
else:
# Dimension defined inside the dataset; need to enumerate instances
if verbose:
print('Enumerating instances of \'%s\' concept' %
(column.column_id))
if column.parent_ref:
parent_column = column_bundle.GetColumnByID(column.parent_ref)
query_column_ids = [column.column_id, column.parent_ref]
else:
parent_column = None
query_column_ids = [column.column_id]
concept_table_rows = data_source_obj.GetTableData(
data_source.QueryParameters(
query_type=data_source.QueryParameters.CONCEPT_QUERY,
column_ids=query_column_ids))
dataset.AddTable(
_CreateConceptTable(
column, concept_table_rows, parent_column, verbose))
dimension_concept = dspl_model.Concept(
concept_id=column.column_id,
concept_extension_reference=column.concept_extension,
data_type=column.data_type,
table_ref='%s_table' % (column.column_id))
if column.parent_ref:
# Add in parent reference property
dimension_concept.properties.append(
dspl_model.Property(column.parent_ref, True))
dataset.AddConcept(dimension_concept)
# Generate slice metadata
for i, slice_column_set in enumerate(_CalculateSlices(column_bundle)):
if verbose:
print('Evaluating slice: %s' % ([c.column_id for c in slice_column_set]))
dimension_ids = []
metric_ids = []
for column in slice_column_set:
if column.slice_role == 'dimension':
if column.concept_ref:
dimension_ids.append(column.concept_ref)
else:
dimension_ids.append(column.column_id)
else:
if column.concept_ref:
metric_ids.append(column.concept_ref)
else:
metric_ids.append(column.column_id)
# Execute slice query
if verbose:
print('Getting slice values')
slice_table_rows = data_source_obj.GetTableData(
data_source.QueryParameters(
query_type=data_source.QueryParameters.SLICE_QUERY,
column_ids=[c.column_id for c in slice_column_set]))
# Add slice and table metadata to dataset model
slice_table = _CreateSliceTable(
slice_column_set,
'slice_%d_table' % i,
'slice_%d_table.csv' % i,
slice_table_rows,
verbose)
dataset.AddTable(slice_table)
new_slice = dspl_model.Slice(
slice_id='slice_%d' % (i),
dimension_refs=dimension_ids,
metric_refs=metric_ids,
dimension_map=dimension_map,
table_ref='slice_%d_table' % i)
dataset.AddSlice(new_slice)
return dataset
|
Subsets and Splits