gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""Circuits using implementation of a Python module (plugin) manager for Naali.
here is a Manager that the PythonScriptModule instanciates,
and which then loads the python modules that are to be autoloaded.
This handles their event registrations, passing incoming events to them etc.
"""
try:
import rexviewer as r
except ImportError: #not running under rex
import mockviewer as r
from circuits import handler, Event, Component, Manager, Debugger
from core.logger import NaaliLogger
#is not identical to the c++ side, where x and y have abs and rel
#XXX consider making identical and possible wrapping of the c++ type
#from collections import namedtuple
#MouseInfo = namedtuple('MouseInfo', 'x y rel_x rel_y')
from core.mouseinfo import MouseInfo
#XXX a temporary fix 'cause circuits socket internals depend on a Started event now
from circuits.core.events import Started
class Key(Event): pass
class Update(Event): pass
class Chat(Event): pass
class Input(Event): pass
class MouseMove(Event): pass
class MouseClick(Event): pass
class SceneAdded(Event): pass
class EntityUpdate(Event): pass
class Exit(Event): pass
class LoginInfo(Event): pass
class InboundNetwork(Event): pass
class GenericMessage(Event): pass
class Logout(Event): pass
class WorldStreamReady(Event): pass
class ComponentRunner:
instance = None
def __init__(self):
# instanciated from the c++ side, as modulemanager there
#assert self.instance is None
ComponentRunner.instance = self #is used as a singleton now. is this needed anywhere actually? XXX
self.mouseinfo = MouseInfo(0,0,0,0)
#m.start() #instead we tick() & flush() in update
self.firstrun = True
r.restart = False
self.start()
r.manager = self
def start(self):
# Create a new circuits Manager
#ignevents = [Update, MouseMove]
ignchannames = ['update', 'started', 'on_mousemove', 'on_mousedrag', 'on_keydown', 'on_input', 'on_mouseclick', 'on_entityupdated', 'on_exit', 'on_keyup', 'on_login', 'on_inboundnetwork', 'on_genericmessage', 'on_scene', 'on_entity_visuals_modified', 'on_logout', 'on_worldstreamready', 'on_sceneadded']
ignchannels = [('*', n) for n in ignchannames]
# Note: instantiating Manager with debugger causes severe lag when running as a true windowed app (no console), so instantiate without debugger
# Fix attempt: give the circuits Debugger a logger which uses Naali logging system, instead of the default which writes to sys.stderr
# Todo: if the default stdout is hidden, the py stdout should be changed
# to something that shows e.g. in console, so prints from scripts show
# (people commonly use those for debugging so they should show somewhere)
d = Debugger(IgnoreChannels = ignchannels, logger=NaaliLogger()) #IgnoreEvents = ignored)
self.m = Manager() + d
#self.m = Manager()
#or __all__ in pymodules __init__ ? (i.e. import pymodules would do that)
if self.firstrun:
import autoload
self.firstrun = False
else: #reload may cause something strange sometimes, so better not do it always in production use, is just a dev tool.
#print "reloading autoload"
import autoload
autoload = reload(autoload)
#print "Autoload module:", autoload
autoload.load(self.m)
self.m.push(Started(self.m, None)) #webserver requires this now, temporarily XXX
def run(self, deltatime=0.1):
#print "."
self.send_event(Update(deltatime), "update") #so that all components are updated immediately once for this frame
#XXX should this be using the __tick__ mechanism of circuits, and how?
m = self.m
m.tick()
def send_event(self, event, channel):
"""simulate sync sending of events using the async lib.
needed to be able to return the info of whether the event is to be
send to more handlers on the c++ side in the Naali event system"""
m = self.m
ret = m.push(event, channel)
while m: m.flush() #circuits components evaluate to false when have no pending events left
if not ret.errors:
#print "EVENT:", event, ret.value
return True in ret #circuits return values implement __contains__ for this use case
else:
#did the debugger already show the traceback?
return False
def RexNetMsgChatFromSimulator(self, frm, message):
self.send_event(Chat(frm, message), "on_chat")
def INPUT_EVENT(self, evid):
"""Note: the PygameDriver for circuits has a different design:
there the event data is Key, and the channel either "keydown" or "keyup",
and mouse and clicks are different events and channels.
Here we have no way to differentiate presses/releases,
'cause the c++ internals don't do that apart from the constant name.
"""
#print "circuits_manager ComponentRunner got input event:", evid
return self.send_event(Input(evid), "on_input")
def KEY_INPUT_EVENT(self, evid, keycode, keymod):
"""Handles key inputs, creates a Circuits Key event with the data provided
WIP, since on_keydown call doesn't work for now, resorted in using Input(keycode)
instead, works similarly but still not the way it should
"""
#print "CircuitManager received KEY_INPUT (event:", evid, "key:", keycode, "mods:", keymod, ")"
#print r.KeyPressed, r.KeyReleased
if evid == 39: #r.KeyPressed:
return self.send_event(Key(keycode, keymod), "on_keydown")
elif evid == 40: #r.KeyReleased:
return self.send_event(Key(keycode, keymod), "on_keyup")
def MOUSE_DRAG_INPUT_EVENT(self, event, x_abs, y_abs, x_rel, y_rel):
self.mouseinfo.setInfo(x_abs, y_abs, x_rel, y_rel)
#print "CircuitsManager got mouse movement", self.mouseinfo, self.mouseinfo.x, self.mouseinfo.y
return self.send_event(MouseMove(event, self.mouseinfo), "on_mousedrag")
def MOUSE_INPUT_EVENT(self, event, x_abs, y_abs, x_rel, y_rel):
#print "CircuitsManager got a mouse click", mb_click, x_abs, y_abs, x_rel, y_rel
#print "CircuitsManager", event
self.mouseinfo.setInfo(x_abs, y_abs, x_rel, y_rel)
if event == 60: #r.MouseMove:
return self.send_event(MouseMove(event, self.mouseinfo), "on_mousemove")
#return self.send_event(Mouse(event, self XXX
else:
return self.send_event(MouseClick(event, self.mouseinfo), "on_mouseclick")
## def SCENE_EVENT(self, evid, entid):
## return self.send_event(EntityUpdate(evid, entid), "on_scene")
def SCENE_ADDED(self, name):
return self.send_event(SceneAdded(name), "on_sceneadded")
def ENTITY_UPDATED(self, entid):
#print "Entity updated!", entid
return self.send_event(EntityUpdate(entid), "on_entityupdated")
def ENTITY_VISUALS_MODIFIED(self, entid):
return self.send_event(EntityUpdate(entid), "on_entity_visuals_modified")
def LOGIN_INFO(self, id):
#print "Login Info", id
#return self.send_event(LoginInfo(id), "on_login") #XXX so wasn't needed or?
return False
def SERVER_DISCONNECTED(self, id):
#print "Circuits got the server disconnection event."
return self.send_event(Logout(id), "on_logout")
def INBOUND_NETWORK(self, id, name):
#print "Circuits got network_in event:", id, name
return self.send_event(InboundNetwork(id, name), "on_inboundnetwork")
##r.randomTest(id) #for testing whether the id gotten is the same after a circulation on the python, note: worked
def GENERIC_MESSAGE(self, typename, data):
#print "Circuits got Generic Message event:", data
return self.send_event(GenericMessage(typename, data), "on_genericmessage")
def WORLD_STREAM_READY(self, event_id):
return self.send_event(WorldStreamReady(event_id), "on_worldstreamready")
def exit(self):
r.logInfo("Circuits manager stopping...")
self.send_event(Exit(), "on_exit") #was originally not running the manager properly so the stop doesn't propagate to components. fix when switch to dev branch of circuits XXX
self.m.stop() #was not going to components now so made the exit event above as a quick fix
while self.m: self.m.flush()
def restart(self):
#r.restart = True
r.logInfo("Restarting python module manager, reloading plugin codes")
self.exit()
self.start()
r.logInfo("...done python restart.")
#r.restart = False
#TestModule moved to own file (err, module)
"""
why not allow plain functions as event handlers too,
the python module of it, i.e. the file, can be considered as the module,
so no classes are required.
circuits has it so that you are supposed to make components.
"""
#~ @e.communication.PRESENCE_STATUS_UPDATE
#~ def sparkle_presence_updates():
#~ """a custom visual thing: some local sparkles upon IM presence updates.
#~ what data do we have in these events? the im participant id?
#~ is that associated to an avatar anywhere? so could e.g. get the location..
#~ """
#~ create_sparkle((0,2, 0,2), 0.5) #in upper right corner, for half a sec
if __name__ == '__main__':
runner = ComponentRunner()
import time
while True:
runner.run(0.1)
#runner.RexNetMsgChatFromSimulator("main..", "hello")
"""
rvalue = runner.KEY_INPUT_EVENT(3, 46, 0)
if rvalue:
print "returned true"
else:
print "returned false"
"""
runner.MOUSE_INPUT(5, 6, 7, 8)
time.sleep(0.01)
|
|
import datetime
import sys
import unittest
from django.conf import settings
from django.contrib.admindocs import utils
from django.contrib.admindocs.views import get_return_data_type
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stderr
from django.urls import reverse
from .models import Company, Person
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# password = "secret"
User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='[email protected]',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime.datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime.datetime(2007, 5, 30, 13, 20, 10)
)
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_docs.urls')
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.admindocs'})
class AdminDocsTestCase(TestCase):
pass
class MiscTests(AdminDocsTestCase):
def setUp(self):
User.objects.create_superuser('super', None, 'secret')
self.client.login(username='super', password='secret')
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
@override_settings(SITE_ID=None) # will restore SITE_ID after the test
def test_no_sites_framework(self):
"""
Without the sites framework, should not access SITE_ID or Site
objects. Deleting settings is fine here as UserSettingsHolder is used.
"""
Site.objects.all().delete()
del settings.SITE_ID
self.client.get('/admindocs/views/') # should not raise
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class AdminDocViewTests(TestDataMixin, AdminDocsTestCase):
def setUp(self):
self.client.login(username='super', password='secret')
def test_index(self):
self.client.logout()
response = self.client.get(reverse('django-admindocs-docroot'), follow=True)
# Should display the login screen
self.assertContains(response,
'<input type="hidden" name="next" value="/admindocs/" />', html=True)
self.client.login(username='super', password='secret')
response = self.client.get(reverse('django-admindocs-docroot'))
self.assertContains(response, '<h1>Documentation</h1>', html=True)
self.assertContains(response,
'<h1 id="site-name"><a href="/admin/">Django '
'administration</a></h1>')
def test_bookmarklets(self):
response = self.client.get(reverse('django-admindocs-bookmarklets'))
self.assertContains(response, '/admindocs/views/')
def test_templatetag_index(self):
response = self.client.get(reverse('django-admindocs-tags'))
self.assertContains(response, '<h3 id="built_in-extends">extends</h3>', html=True)
def test_templatefilter_index(self):
response = self.client.get(reverse('django-admindocs-filters'))
self.assertContains(response, '<h3 id="built_in-first">first</h3>', html=True)
def test_view_index(self):
response = self.client.get(reverse('django-admindocs-views-index'))
self.assertContains(response,
'<h3><a href="/admindocs/views/django.contrib.admindocs.views.BaseAdminDocsView/">/admindocs/</a></h3>',
html=True)
self.assertContains(response, 'Views by namespace test')
self.assertContains(response, 'Name: <code>test:func</code>.')
def test_view_detail(self):
response = self.client.get(
reverse('django-admindocs-views-detail',
args=['django.contrib.admindocs.views.BaseAdminDocsView']))
# View docstring
self.assertContains(response, 'Base view for admindocs views.')
def test_view_detail_illegal_import(self):
"""
#23601 - Ensure the view exists in the URLconf.
"""
response = self.client.get(
reverse('django-admindocs-views-detail',
args=['urlpatterns_reverse.nonimported_module.view']))
self.assertEqual(response.status_code, 404)
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_model_index(self):
response = self.client.get(reverse('django-admindocs-models-index'))
self.assertContains(
response,
'<h2 id="app-auth">Authentication and Authorization (django.contrib.auth)</h2>',
html=True
)
def test_template_detail(self):
response = self.client.get(reverse('django-admindocs-templates',
args=['admin_doc/template_detail.html']))
self.assertContains(response,
'<h1>Template: "admin_doc/template_detail.html"</h1>', html=True)
def test_missing_docutils(self):
utils.docutils_is_available = False
try:
response = self.client.get(reverse('django-admindocs-docroot'))
self.assertContains(response,
'<h3>The admin documentation system requires Python\'s '
'<a href="http://docutils.sf.net/">docutils</a> library.</h3>',
html=True)
self.assertContains(response,
'<h1 id="site-name"><a href="/admin/">Django '
'administration</a></h1>')
finally:
utils.docutils_is_available = True
@override_settings(TEMPLATES=[{
'NAME': 'ONE',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'NAME': 'TWO',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}])
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class AdminDocViewWithMultipleEngines(AdminDocViewTests):
def test_templatefilter_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse('django-admindocs-filters'))
self.assertContains(response, '<title>Template filters</title>', html=True)
def test_templatetag_index(self):
# Overridden because non-trivial TEMPLATES settings aren't supported
# but the page shouldn't crash (#24125).
response = self.client.get(reverse('django-admindocs-tags'))
self.assertContains(response, '<title>Template tags</title>', html=True)
class XViewMiddlewareTest(TestDataMixin, AdminDocsTestCase):
def test_xview_func(self):
user = User.objects.get(username='super')
response = self.client.head('/xview/func/')
self.assertNotIn('X-View', response)
self.client.login(username='super', password='secret')
response = self.client.head('/xview/func/')
self.assertIn('X-View', response)
self.assertEqual(response['X-View'], 'admin_docs.views.xview')
user.is_staff = False
user.save()
response = self.client.head('/xview/func/')
self.assertNotIn('X-View', response)
user.is_staff = True
user.is_active = False
user.save()
response = self.client.head('/xview/func/')
self.assertNotIn('X-View', response)
def test_xview_class(self):
user = User.objects.get(username='super')
response = self.client.head('/xview/class/')
self.assertNotIn('X-View', response)
self.client.login(username='super', password='secret')
response = self.client.head('/xview/class/')
self.assertIn('X-View', response)
self.assertEqual(response['X-View'], 'admin_docs.views.XViewClass')
user.is_staff = False
user.save()
response = self.client.head('/xview/class/')
self.assertNotIn('X-View', response)
user.is_staff = True
user.is_active = False
user.save()
response = self.client.head('/xview/class/')
self.assertNotIn('X-View', response)
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class DefaultRoleTest(AdminDocsTestCase):
def test_parse_rst(self):
"""
``django.contrib.admindocs.utils.parse_rst`` should use
``cmsreference`` as the default role.
"""
markup = ('<p><a class="reference external" href="/admindocs/%s">'
'title</a></p>\n')
self.assertEqual(utils.parse_rst('`title`', 'model'),
markup % 'models/title/')
self.assertEqual(utils.parse_rst('`title`', 'view'),
markup % 'views/title/')
self.assertEqual(utils.parse_rst('`title`', 'template'),
markup % 'templates/title/')
self.assertEqual(utils.parse_rst('`title`', 'filter'),
markup % 'filters/#title')
self.assertEqual(utils.parse_rst('`title`', 'tag'),
markup % 'tags/#title')
def test_publish_parts(self):
"""
Django shouldn't break the default role for interpreted text
when ``publish_parts`` is used directly, by setting it to
``cmsreference``. See #6681.
"""
import docutils
self.assertNotEqual(docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE,
'cmsreference')
source = 'reST, `interpreted text`, default role.'
markup = '<p>reST, <cite>interpreted text</cite>, default role.</p>\n'
parts = docutils.core.publish_parts(source=source, writer_name="html4css1")
self.assertEqual(parts['fragment'], markup)
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class TestModelDetailView(TestDataMixin, AdminDocsTestCase):
"""
Tests that various details render correctly
"""
def setUp(self):
self.client.login(username='super', password='secret')
with captured_stderr() as self.docutils_stderr:
self.response = self.client.get(reverse('django-admindocs-models-detail', args=['admin_docs', 'Person']))
def test_method_excludes(self):
"""
Methods that begin with strings defined in
``django.contrib.admindocs.views.MODEL_METHODS_EXCLUDE``
should not get displayed in the admin docs.
"""
self.assertContains(self.response, "<td>get_full_name</td>")
self.assertNotContains(self.response, "<td>_get_full_name</td>")
self.assertNotContains(self.response, "<td>add_image</td>")
self.assertNotContains(self.response, "<td>delete_image</td>")
self.assertNotContains(self.response, "<td>set_status</td>")
self.assertNotContains(self.response, "<td>save_changes</td>")
def test_methods_with_arguments(self):
"""
Methods that take arguments should also displayed.
"""
self.assertContains(self.response, "<h3>Methods with arguments</h3>")
self.assertContains(self.response, "<td>rename_company</td>")
self.assertContains(self.response, "<td>dummy_function</td>")
self.assertContains(self.response, "<td>suffix_company_name</td>")
def test_methods_with_arguments_display_arguments(self):
"""
Methods with arguments should have their arguments displayed.
"""
self.assertContains(self.response, "<td>new_name</td>")
def test_methods_with_arguments_display_arguments_default_value(self):
"""
Methods with keyword arguments should have their arguments displayed.
"""
self.assertContains(self.response, "<td>suffix='ltd'</td>")
def test_methods_with_multiple_arguments_display_arguments(self):
"""
Methods with multiple arguments should have all their arguments
displayed, but omitting 'self'.
"""
self.assertContains(self.response, "<td>baz, rox, *some_args, **some_kwargs</td>")
def test_method_data_types(self):
"""
We should be able to get a basic idea of the type returned
by a method
"""
company = Company.objects.create(name="Django")
person = Person.objects.create(
first_name="Human",
last_name="User",
company=company
)
self.assertEqual(
get_return_data_type(person.get_status_count.__name__),
'Integer'
)
self.assertEqual(
get_return_data_type(person.get_groups_list.__name__),
'List'
)
def test_descriptions_render_correctly(self):
"""
The ``description`` field should render correctly for each type of field
"""
# help text in fields
self.assertContains(self.response, "<td>first name - The person's first name</td>")
self.assertContains(self.response, "<td>last name - The person's last name</td>")
# method docstrings
self.assertContains(self.response, "<p>Get the full name of the person</p>")
link = '<a class="reference external" href="/admindocs/models/%s/">%s</a>'
markup = '<p>the related %s object</p>'
company_markup = markup % (link % ("admin_docs.company", "admin_docs.Company"))
# foreign keys
self.assertContains(self.response, company_markup)
# foreign keys with help text
self.assertContains(self.response, "%s\n - place of work" % company_markup)
# many to many fields
self.assertContains(
self.response,
"number of related %s objects" % (link % ("admin_docs.group", "admin_docs.Group"))
)
self.assertContains(
self.response,
"all related %s objects" % (link % ("admin_docs.group", "admin_docs.Group"))
)
# "raw" and "include" directives are disabled
self.assertContains(self.response, '<p>"raw" directive disabled.</p>',)
self.assertContains(self.response, '.. raw:: html\n :file: admin_docs/evilfile.txt')
self.assertContains(self.response, '<p>"include" directive disabled.</p>',)
self.assertContains(self.response, '.. include:: admin_docs/evilfile.txt')
out = self.docutils_stderr.getvalue()
self.assertIn('"raw" directive disabled', out)
self.assertIn('"include" directive disabled', out)
def test_model_with_many_to_one(self):
link = '<a class="reference external" href="/admindocs/models/%s/">%s</a>'
response = self.client.get(
reverse('django-admindocs-models-detail', args=['admin_docs', 'company'])
)
self.assertContains(
response,
"number of related %s objects" % (link % ("admin_docs.person", "admin_docs.Person"))
)
self.assertContains(
response,
"all related %s objects" % (link % ("admin_docs.person", "admin_docs.Person"))
)
def test_model_with_no_backward_relations_render_only_relevant_fields(self):
"""
A model with ``related_name`` of `+` should not show backward relationship
links in admin docs
"""
response = self.client.get(
reverse('django-admindocs-models-detail',
args=['admin_docs', 'family']))
fields = response.context_data.get('fields')
self.assertEqual(len(fields), 2)
def test_model_docstring_renders_correctly(self):
summary = (
'<h2 class="subhead"><p>Stores information about a person, related to <a class="reference external" '
'href="/admindocs/models/myapp.company/">myapp.Company</a>.</p></h2>'
)
subheading = '<p><strong>Notes</strong></p>'
body = '<p>Use <tt class="docutils literal">save_changes()</tt> when saving this object.</p>'
model_body = (
'<dl class="docutils"><dt><tt class="'
'docutils literal">company</tt></dt><dd>Field storing <a class="'
'reference external" href="/admindocs/models/myapp.company/">'
'myapp.Company</a> where the person works.</dd></dl>'
)
self.assertContains(self.response, 'DESCRIPTION')
self.assertContains(self.response, summary, html=True)
self.assertContains(self.response, subheading, html=True)
self.assertContains(self.response, body, html=True)
self.assertContains(self.response, model_body, html=True)
def test_model_detail_title(self):
self.assertContains(self.response, '<h1>admin_docs.Person</h1>', html=True)
@unittest.skipUnless(utils.docutils_is_available, "no docutils installed.")
class TestUtils(AdminDocsTestCase):
"""
This __doc__ output is required for testing. I copied this example from
`admindocs` documentation. (TITLE)
Display an individual :model:`myapp.MyModel`.
**Context**
``RequestContext``
``mymodel``
An instance of :model:`myapp.MyModel`.
**Template:**
:template:`myapp/my_template.html` (DESCRIPTION)
some_metadata: some data
"""
def setUp(self):
self.docstring = self.__doc__
def test_trim_docstring(self):
trim_docstring_output = utils.trim_docstring(self.docstring)
trimmed_docstring = (
'This __doc__ output is required for testing. I copied this '
'example from\n`admindocs` documentation. (TITLE)\n\n'
'Display an individual :model:`myapp.MyModel`.\n\n'
'**Context**\n\n``RequestContext``\n\n``mymodel``\n'
' An instance of :model:`myapp.MyModel`.\n\n'
'**Template:**\n\n:template:`myapp/my_template.html` '
'(DESCRIPTION)\n\nsome_metadata: some data'
)
self.assertEqual(trim_docstring_output, trimmed_docstring)
def test_parse_docstring(self):
title, description, metadata = utils.parse_docstring(self.docstring)
docstring_title = (
'This __doc__ output is required for testing. I copied this example from\n'
'`admindocs` documentation. (TITLE)'
)
docstring_description = (
'Display an individual :model:`myapp.MyModel`.\n\n'
'**Context**\n\n``RequestContext``\n\n``mymodel``\n'
' An instance of :model:`myapp.MyModel`.\n\n'
'**Template:**\n\n:template:`myapp/my_template.html` '
'(DESCRIPTION)'
)
self.assertEqual(title, docstring_title)
self.assertEqual(description, docstring_description)
self.assertEqual(metadata, {'some_metadata': 'some data'})
def test_title_output(self):
title, description, metadata = utils.parse_docstring(self.docstring)
title_output = utils.parse_rst(title, 'model', 'model:admindocs')
self.assertIn('TITLE', title_output)
title_rendered = (
'<p>This __doc__ output is required for testing. I copied this '
'example from\n<a class="reference external" '
'href="/admindocs/models/admindocs/">admindocs</a> documentation. '
'(TITLE)</p>\n'
)
self.assertHTMLEqual(title_output, title_rendered)
def test_description_output(self):
title, description, metadata = utils.parse_docstring(self.docstring)
description_output = utils.parse_rst(description, 'model', 'model:admindocs')
description_rendered = (
'<p>Display an individual <a class="reference external" '
'href="/admindocs/models/myapp.mymodel/">myapp.MyModel</a>.</p>\n'
'<p><strong>Context</strong></p>\n<p><tt class="docutils literal">'
'RequestContext</tt></p>\n<dl class="docutils">\n<dt><tt class="'
'docutils literal">mymodel</tt></dt>\n<dd>An instance of <a class="'
'reference external" href="/admindocs/models/myapp.mymodel/">'
'myapp.MyModel</a>.</dd>\n</dl>\n<p><strong>Template:</strong></p>'
'\n<p><a class="reference external" href="/admindocs/templates/'
'myapp/my_template.html/">myapp/my_template.html</a> (DESCRIPTION)'
'</p>\n'
)
self.assertHTMLEqual(description_output, description_rendered)
def test_initial_header_level(self):
header = 'should be h3...\n\nHeader\n------\n'
output = utils.parse_rst(header, 'header')
self.assertIn('<h3>Header</h3>', output)
|
|
import base64
import datetime
import sys
import time
import unittest
from unittest import mock
import xmlrpc.client as xmlrpclib
import xmlrpc.server
import http.client
import socket
import os
import re
import io
import contextlib
from test import support
try:
import threading
except ImportError:
threading = None
alist = [{'astring': '[email protected]',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary(b"my dog has fleas"),
'b64bytes': b"my dog has fleas",
'b64bytearray': bytearray(b"my dog has fleas"),
'boolean': False,
'unicode': '\u4000\u6000\u8000',
'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 2, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 2, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
dump = xmlrpclib.dumps((alist,))
load = xmlrpclib.loads(dump)
self.assertEqual(alist, load[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_builtin_types set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('20050210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_datetime=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 2, 10, 11, 41, 23)
self.assertEqual(dt, xmlrpclib.DateTime('00010210T11:41:23'))
s = xmlrpclib.dumps((dt,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), datetime.datetime)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newdt,) = result
self.assertEqual(newdt, dt)
self.assertIs(type(newdt), xmlrpclib.DateTime)
self.assertIsNone(m)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assertIsInstance(new_d.value, str)
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assertIsInstance(s, str)
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEqual(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxsize > 2**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps,
(xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int,
xmlrpclib.MININT-1, dummy_write)
def test_dump_double(self):
xmlrpclib.dumps((float(2 ** 34),))
xmlrpclib.dumps((float(xmlrpclib.MAXINT),
float(xmlrpclib.MININT)))
xmlrpclib.dumps((float(xmlrpclib.MAXINT + 42),
float(xmlrpclib.MININT - 42)))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_double(xmlrpclib.MAXINT, dummy_write)
m.dump_double(xmlrpclib.MININT, dummy_write)
m.dump_double(xmlrpclib.MAXINT + 42, dummy_write)
m.dump_double(xmlrpclib.MININT - 42, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEqual(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_dump_bytes(self):
sample = b"my dog has fleas"
self.assertEqual(sample, xmlrpclib.Binary(sample))
for type_ in bytes, bytearray, xmlrpclib.Binary:
value = type_(sample)
s = xmlrpclib.dumps((value,))
result, m = xmlrpclib.loads(s, use_builtin_types=True)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), bytes)
self.assertIsNone(m)
result, m = xmlrpclib.loads(s, use_builtin_types=False)
(newvalue,) = result
self.assertEqual(newvalue, sample)
self.assertIs(type(newvalue), xmlrpclib.Binary)
self.assertIsNone(m)
def test_get_host_info(self):
# see bug #3613, this raised a TypeError
transp = xmlrpc.client.Transport()
self.assertEqual(transp.get_host_info("[email protected]"),
('host.tld',
[('Authorization', 'Basic dXNlcg==')], {}))
def test_ssl_presence(self):
try:
import ssl
except ImportError:
has_ssl = False
else:
has_ssl = True
try:
xmlrpc.client.ServerProxy('https://localhost:9999').bad_function()
except NotImplementedError:
self.assertFalse(has_ssl, "xmlrpc client's error with SSL support")
except socket.error:
self.assertTrue(has_ssl)
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEqual(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
def test_dotted_attribute(self):
# this will raise AttributeError because code don't want us to use
# private methods
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
with mock.patch('time.localtime') as localtime_mock:
time_struct = time.struct_time(
[2013, 7, 15, 0, 24, 49, 0, 196, 0])
localtime_mock.return_value = time_struct
localtime = time.localtime()
t = xmlrpclib.DateTime()
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", localtime))
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t),
time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t2, tref)
def test_comparison(self):
now = datetime.datetime.now()
dtime = xmlrpclib.DateTime(now.timetuple())
# datetime vs. DateTime
self.assertTrue(dtime == now)
self.assertTrue(now == dtime)
then = now + datetime.timedelta(seconds=4)
self.assertTrue(then >= dtime)
self.assertTrue(dtime < then)
# str vs. DateTime
dstr = now.strftime("%Y%m%dT%H:%M:%S")
self.assertTrue(dtime == dstr)
self.assertTrue(dstr == dtime)
dtime_then = xmlrpclib.DateTime(then.timetuple())
self.assertTrue(dtime_then >= dstr)
self.assertTrue(dstr < dtime_then)
# some other types
dbytes = dstr.encode('ascii')
dtuple = now.timetuple()
with self.assertRaises(TypeError):
dtime == 1970
with self.assertRaises(TypeError):
dtime != dbytes
with self.assertRaises(TypeError):
dtime == bytearray(dbytes)
with self.assertRaises(TypeError):
dtime != dtuple
with self.assertRaises(TypeError):
dtime < float(1970)
with self.assertRaises(TypeError):
dtime > dbytes
with self.assertRaises(TypeError):
dtime <= bytearray(dbytes)
with self.assertRaises(TypeError):
dtime >= dtuple
class BinaryTestCase(unittest.TestCase):
# XXX What should str(Binary(b"\xff")) return? I'm chosing "\xff"
# for now (i.e. interpreting the binary data as Latin-1-encoded
# text). But this feels very unsatisfactory. Perhaps we should
# only define repr(), and return r"Binary(b'\xff')" instead?
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = b'\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), str(d, "latin-1"))
def test_decode(self):
d = b'\x01\x02\x03abc123\xff\xfe'
de = base64.encodebytes(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), str(d, "latin-1"))
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), str(d, "latin-1"))
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(xmlrpc.server.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
serv = MyXMLRPCServer(("localhost", 0), requestHandler,
logRequests=False, bind_and_activate=False)
try:
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
def http_multi_server(evt, numrequests, requestHandler=None):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(xmlrpc.server.MultiPathXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
if not requestHandler:
requestHandler = xmlrpc.server.SimpleXMLRPCRequestHandler
class MyRequestHandler(requestHandler):
rpc_paths = []
class BrokenDispatcher:
def _marshaled_dispatch(self, data, dispatch_method=None, path=None):
raise RuntimeError("broken dispatcher")
serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler,
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
try:
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
paths = ["/foo", "/foo/bar"]
for path in paths:
d = serv.add_dispatcher(path, xmlrpc.server.SimpleXMLRPCDispatcher())
d.register_introspection_functions()
d.register_multicall_functions()
serv.get_dispatcher(paths[0]).register_function(pow)
serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add')
serv.add_dispatcher("/is/broken", BrokenDispatcher())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
def make_request_and_skipIf(condition, reason):
# If we skip the test, we have to make a request because the
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
try:
xmlrpclib.ServerProxy(URL).my_function()
except (xmlrpclib.ProtocolError, socket.error) as e:
if not is_unavailable_exception(e):
raise
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
@unittest.skipUnless(threading, 'Threading required for this test.')
class BaseServerTestCase(unittest.TestCase):
requestHandler = None
request_count = 1
threadFunc = staticmethod(http_server)
def setUp(self):
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, self.request_count, self.requestHandler)
threading.Thread(target=self.threadFunc, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
class SimpleServerTestCase(BaseServerTestCase):
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_nonascii(self):
start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t'
end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n'
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.add(start_string, end_string),
start_string + end_string)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with http.client, it should return 404 header and
# 'Not Found' message.
conn = httplib.client.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
expected_methods = set(['pow', 'div', 'my_function', 'add',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
@make_request_and_skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<class \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<class \'Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
xmlrpc.server.resolve_dotted_attribute, str, '__add')
self.assertTrue(xmlrpc.server.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
def test_unicode_host(self):
server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
self.assertEqual(server.add("a", "\xe9"), "a\xe9")
def test_partial_post(self):
# Check that a partial POST doesn't make the server loop: issue #14001.
conn = http.client.HTTPConnection(ADDR, PORT)
conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
conn.close()
class MultiPathServerTestCase(BaseServerTestCase):
threadFunc = staticmethod(http_multi_server)
request_count = 2
def test_path1(self):
p = xmlrpclib.ServerProxy(URL+"/foo")
self.assertEqual(p.pow(6,8), 6**8)
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
def test_path2(self):
p = xmlrpclib.ServerProxy(URL+"/foo/bar")
self.assertEqual(p.add(6,8), 6+8)
self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
def test_path3(self):
p = xmlrpclib.ServerProxy(URL+"/is/broken")
self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class BaseKeepaliveServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
myRequests = []
def handle(self):
self.myRequests.append([])
self.reqidx = len(self.myRequests)-1
return self.parentClass.handle(self)
def handle_one_request(self):
result = self.parentClass.handle_one_request(self)
self.myRequests[self.reqidx].append(self.raw_requestline)
return result
requestHandler = RequestHandler
def setUp(self):
#clear request log
self.RequestHandler.myRequests = []
return BaseServerTestCase.setUp(self)
#A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism
#does indeed serve subsequent requests on the same connection
class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase):
def test_two(self):
p = xmlrpclib.ServerProxy(URL)
#do three requests.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been handled by a single request handler
self.assertEqual(len(self.RequestHandler.myRequests), 1)
#check that we did at least two (the third may be pending append
#due to thread scheduling)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
#test special attribute access on the serverproxy, through the __call__
#function.
class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase):
#ask for two keepalive requests to be handled.
request_count=2
def test_close(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")() #this should trigger a new keep-alive request
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
#they should have all been two request handlers, each having logged at least
#two complete requests
self.assertEqual(len(self.RequestHandler.myRequests), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2)
self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2)
def test_transport(self):
p = xmlrpclib.ServerProxy(URL)
#do some requests with close.
self.assertEqual(p.pow(6,8), 6**8)
p("transport").close() #same as above, really.
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
self.assertEqual(len(self.RequestHandler.myRequests), 2)
#A test case that verifies that gzip encoding works in both directions
#(for a request and the response)
class GzipServerTestCase(BaseServerTestCase):
#a request handler that supports keep-alive and logs requests into a
#class variable
class RequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
parentClass = xmlrpc.server.SimpleXMLRPCRequestHandler
protocol_version = 'HTTP/1.1'
def do_POST(self):
#store content of last request in class
self.__class__.content_length = int(self.headers["content-length"])
return self.parentClass.do_POST(self)
requestHandler = RequestHandler
class Transport(xmlrpclib.Transport):
#custom transport, stores the response length for our perusal
fake_gzip = False
def parse_response(self, response):
self.response_length=int(response.getheader("content-length", 0))
return xmlrpclib.Transport.parse_response(self, response)
def send_content(self, connection, body):
if self.fake_gzip:
#add a lone gzip header to induce decode error remotely
connection.putheader("Content-Encoding", "gzip")
return xmlrpclib.Transport.send_content(self, connection, body)
def setUp(self):
BaseServerTestCase.setUp(self)
def test_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
p = xmlrpclib.ServerProxy(URL, transport=t)
self.assertEqual(p.pow(6,8), 6**8)
a = self.RequestHandler.content_length
t.encode_threshold = 0 #turn on request encoding
self.assertEqual(p.pow(6,8), 6**8)
b = self.RequestHandler.content_length
self.assertTrue(a>b)
p("close")()
def test_bad_gzip_request(self):
t = self.Transport()
t.encode_threshold = None
t.fake_gzip = True
p = xmlrpclib.ServerProxy(URL, transport=t)
cm = self.assertRaisesRegex(xmlrpclib.ProtocolError,
re.compile(r"\b400\b"))
with cm:
p.pow(6, 8)
p("close")()
def test_gsip_response(self):
t = self.Transport()
p = xmlrpclib.ServerProxy(URL, transport=t)
old = self.requestHandler.encode_threshold
self.requestHandler.encode_threshold = None #no encoding
self.assertEqual(p.pow(6,8), 6**8)
a = t.response_length
self.requestHandler.encode_threshold = 0 #always encode
self.assertEqual(p.pow(6,8), 6**8)
p("close")()
b = t.response_length
self.requestHandler.encode_threshold = old
self.assertTrue(a>b)
#Test special attributes of the ServerProxy object
class ServerProxyTestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
if threading:
self.url = URL
else:
# Without threading, http_server() and http_multi_server() will not
# be executed and URL is still equal to None. 'http://' is a just
# enough to choose the scheme (HTTP)
self.url = 'http://'
def test_close(self):
p = xmlrpclib.ServerProxy(self.url)
self.assertEqual(p('close')(), None)
def test_transport(self):
t = xmlrpclib.Transport()
p = xmlrpclib.ServerProxy(self.url, transport=t)
self.assertEqual(p('transport'), t)
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(http.client.HTTPMessage):
def get(self, key, failobj=None):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return super().get(key, failobj)
@unittest.skipUnless(threading, 'Threading required for this test.')
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
default_class = http.client.HTTPMessage
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = default_class
def test_basic(self):
# check that flag is false by default
flagval = xmlrpc.server.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
xmlrpc.server.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
xmlrpc.server.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error) as e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("X-exception"), expected_err)
self.assertTrue(e.headers.get("X-traceback") is not None)
else:
self.fail('ProtocolError not raised')
@contextlib.contextmanager
def captured_stdout(encoding='utf-8'):
"""A variation on support.captured_stdout() which gives a text stream
having a `buffer` attribute.
"""
import io
orig_stdout = sys.stdout
sys.stdout = io.TextIOWrapper(io.BytesIO(), encoding=encoding)
try:
yield sys.stdout
finally:
sys.stdout = orig_stdout
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = xmlrpc.server.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
with support.EnvironmentVarGuard() as env:
env['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
with captured_stdout(encoding=self.cgi.encoding) as data_out:
self.cgi.handle_request()
# parse Status header
data_out.seek(0)
handle = data_out.read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
with support.EnvironmentVarGuard() as env, \
captured_stdout(encoding=self.cgi.encoding) as data_out, \
support.captured_stdin() as data_in:
data_in.write(data)
data_in.seek(0)
env['CONTENT_LENGTH'] = str(len(data))
self.cgi.handle_request()
data_out.seek(0)
# will respond exception, if so, our goal is achieved ;)
handle = data_out.read()
# start with 44th char so as not to get http header, we just
# need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
# Also test the content-length returned by handle_request
# Using the same test method inorder to avoid all the datapassing
# boilerplate code.
# Test for bug: http://bugs.python.org/issue5040
content = handle[handle.find("<?xml"):]
self.assertEqual(
int(re.search('Content-Length: (\d+)', handle).group(1)),
len(content))
class UseBuiltinTypesTestCase(unittest.TestCase):
def test_use_builtin_types(self):
# SimpleXMLRPCDispatcher.__init__ accepts use_builtin_types, which
# makes all dispatch of binary data as bytes instances, and all
# dispatch of datetime argument as datetime.datetime instances.
self.log = []
expected_bytes = b"my dog has fleas"
expected_date = datetime.datetime(2008, 5, 26, 18, 25, 12)
marshaled = xmlrpclib.dumps((expected_bytes, expected_date), 'foobar')
def foobar(*args):
self.log.extend(args)
handler = xmlrpc.server.SimpleXMLRPCDispatcher(
allow_none=True, encoding=None, use_builtin_types=True)
handler.register_function(foobar)
handler._marshaled_dispatch(marshaled)
self.assertEqual(len(self.log), 2)
mybytes, mydate = self.log
self.assertEqual(self.log, [expected_bytes, expected_date])
self.assertIs(type(mydate), datetime.datetime)
self.assertIs(type(mybytes), bytes)
def test_cgihandler_has_use_builtin_types_flag(self):
handler = xmlrpc.server.CGIXMLRPCRequestHandler(use_builtin_types=True)
self.assertTrue(handler.use_builtin_types)
def test_xmlrpcserver_has_use_builtin_types_flag(self):
server = xmlrpc.server.SimpleXMLRPCServer(("localhost", 0),
use_builtin_types=True)
server.server_close()
self.assertTrue(server.use_builtin_types)
@support.reap_threads
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase]
xmlrpc_tests.append(UseBuiltinTypesTestCase)
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(KeepaliveServerTestCase1)
xmlrpc_tests.append(KeepaliveServerTestCase2)
try:
import gzip
xmlrpc_tests.append(GzipServerTestCase)
except ImportError:
pass #gzip not supported in this build
xmlrpc_tests.append(MultiPathServerTestCase)
xmlrpc_tests.append(ServerProxyTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import itertools
from oslo.config import cfg
from neutron.agent.linux import utils
from neutron.plugins.common import constants as qconstants
from neutron.services.loadbalancer import constants
PROTOCOL_MAP = {
constants.PROTOCOL_TCP: 'tcp',
constants.PROTOCOL_HTTP: 'http',
constants.PROTOCOL_HTTPS: 'tcp',
}
BALANCE_MAP = {
constants.LB_METHOD_ROUND_ROBIN: 'roundrobin',
constants.LB_METHOD_LEAST_CONNECTIONS: 'leastconn',
constants.LB_METHOD_SOURCE_IP: 'source'
}
STATS_MAP = {
constants.STATS_ACTIVE_CONNECTIONS: 'qcur',
constants.STATS_MAX_CONNECTIONS: 'qmax',
constants.STATS_CURRENT_SESSIONS: 'scur',
constants.STATS_MAX_SESSIONS: 'smax',
constants.STATS_TOTAL_SESSIONS: 'stot',
constants.STATS_IN_BYTES: 'bin',
constants.STATS_OUT_BYTES: 'bout',
constants.STATS_CONNECTION_ERRORS: 'econ',
constants.STATS_RESPONSE_ERRORS: 'eresp'
}
ACTIVE = qconstants.ACTIVE
INACTIVE = qconstants.INACTIVE
def save_config(conf_path, logical_config, socket_path=None):
"""Convert a logical configuration to the HAProxy version."""
data = []
data.extend(_build_global(logical_config, socket_path=socket_path))
data.extend(_build_defaults(logical_config))
data.extend(_build_frontend(logical_config))
data.extend(_build_backend(logical_config))
utils.replace_file(conf_path, '\n'.join(data))
def _build_global(config, socket_path=None):
opts = [
'daemon',
'user nobody',
'group %s' % cfg.CONF.user_group,
'log /dev/log local0',
'log /dev/log local1 notice'
]
if socket_path:
opts.append('stats socket %s mode 0666 level user' % socket_path)
return itertools.chain(['global'], ('\t' + o for o in opts))
def _build_defaults(config):
opts = [
'log global',
'retries 3',
'option redispatch',
'timeout connect 5000',
'timeout client 50000',
'timeout server 50000',
]
return itertools.chain(['defaults'], ('\t' + o for o in opts))
def _build_frontend(config):
protocol = config['vip']['protocol']
opts = [
'option tcplog',
'bind %s:%d' % (
_get_first_ip_from_port(config['vip']['port']),
config['vip']['protocol_port']
),
'mode %s' % PROTOCOL_MAP[protocol],
'default_backend %s' % config['pool']['id'],
]
if config['vip']['connection_limit'] >= 0:
opts.append('maxconn %s' % config['vip']['connection_limit'])
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
return itertools.chain(
['frontend %s' % config['vip']['id']],
('\t' + o for o in opts)
)
def _build_backend(config):
protocol = config['pool']['protocol']
lb_method = config['pool']['lb_method']
opts = [
'mode %s' % PROTOCOL_MAP[protocol],
'balance %s' % BALANCE_MAP.get(lb_method, 'roundrobin')
]
if protocol == constants.PROTOCOL_HTTP:
opts.append('option forwardfor')
# add the first health_monitor (if available)
server_addon, health_opts = _get_server_health_option(config)
opts.extend(health_opts)
# add session persistence (if available)
persist_opts = _get_session_persistence(config)
opts.extend(persist_opts)
# add the members
for member in config['members']:
if member['status'] in (ACTIVE, INACTIVE) and member['admin_state_up']:
server = (('server %(id)s %(address)s:%(protocol_port)s '
'weight %(weight)s') % member) + server_addon
if _has_http_cookie_persistence(config):
server += ' cookie %d' % config['members'].index(member)
opts.append(server)
return itertools.chain(
['backend %s' % config['pool']['id']],
('\t' + o for o in opts)
)
def _get_first_ip_from_port(port):
for fixed_ip in port['fixed_ips']:
return fixed_ip['ip_address']
def _get_server_health_option(config):
"""return the first active health option."""
for monitor in config['healthmonitors']:
# not checking the status of healthmonitor for two reasons:
# 1) status field is absent in HealthMonitor model
# 2) only active HealthMonitors are fetched with
# LoadBalancerCallbacks.get_logical_device
if monitor['admin_state_up']:
break
else:
return '', []
server_addon = ' check inter %(delay)ds fall %(max_retries)d' % monitor
opts = [
'timeout check %ds' % monitor['timeout']
]
if monitor['type'] in (constants.HEALTH_MONITOR_HTTP,
constants.HEALTH_MONITOR_HTTPS):
opts.append('option httpchk %(http_method)s %(url_path)s' % monitor)
opts.append(
'http-check expect rstatus %s' %
'|'.join(_expand_expected_codes(monitor['expected_codes']))
)
if monitor['type'] == constants.HEALTH_MONITOR_HTTPS:
opts.append('option ssl-hello-chk')
return server_addon, opts
def _get_session_persistence(config):
persistence = config['vip'].get('session_persistence')
if not persistence:
return []
opts = []
if persistence['type'] == constants.SESSION_PERSISTENCE_SOURCE_IP:
opts.append('stick-table type ip size 10k')
opts.append('stick on src')
elif persistence['type'] == constants.SESSION_PERSISTENCE_HTTP_COOKIE:
opts.append('cookie SRV insert indirect nocache')
elif (persistence['type'] == constants.SESSION_PERSISTENCE_APP_COOKIE and
persistence.get('cookie_name')):
opts.append('appsession %s len 56 timeout 3h' %
persistence['cookie_name'])
return opts
def _has_http_cookie_persistence(config):
return (config['vip'].get('session_persistence') and
config['vip']['session_persistence']['type'] ==
constants.SESSION_PERSISTENCE_HTTP_COOKIE)
def _expand_expected_codes(codes):
"""Expand the expected code string in set of codes.
200-204 -> 200, 201, 202, 204
200, 203 -> 200, 203
"""
retval = set()
for code in codes.replace(',', ' ').split(' '):
code = code.strip()
if not code:
continue
elif '-' in code:
low, hi = code.split('-')[:2]
retval.update(str(i) for i in xrange(int(low), int(hi) + 1))
else:
retval.add(code)
return retval
|
|
from __future__ import print_function, division
from collections import deque
from random import randint
from sympy.core.compatibility import range
from sympy.external import import_module
from sympy import Mul, Basic, Number, Pow, Integer
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.dagger import Dagger
__all__ = [
# Public interfaces
'generate_gate_rules',
'generate_equivalent_ids',
'GateIdentity',
'bfs_identity_search',
'random_identity_search',
# "Private" functions
'is_scalar_sparse_matrix',
'is_scalar_nonsparse_matrix',
'is_degenerate',
'is_reducible',
]
np = import_module('numpy')
scipy = import_module('scipy', __import__kwargs={'fromlist': ['sparse']})
def is_scalar_sparse_matrix(circuit, nqubits, identity_only, eps=1e-11):
"""Checks if a given scipy.sparse matrix is a scalar matrix.
A scalar matrix is such that B = bI, where B is the scalar
matrix, b is some scalar multiple, and I is the identity
matrix. A scalar matrix would have only the element b along
it's main diagonal and zeroes elsewhere.
Parameters
==========
circuit : Gate tuple
Sequence of quantum gates representing a quantum circuit
nqubits : int
Number of qubits in the circuit
identity_only : bool
Check for only identity matrices
eps : number
The tolerance value for zeroing out elements in the matrix.
Values in the range [-eps, +eps] will be changed to a zero.
"""
if not np or not scipy:
pass
matrix = represent(Mul(*circuit), nqubits=nqubits,
format='scipy.sparse')
# In some cases, represent returns a 1D scalar value in place
# of a multi-dimensional scalar matrix
if (isinstance(matrix, int)):
return matrix == 1 if identity_only else True
# If represent returns a matrix, check if the matrix is diagonal
# and if every item along the diagonal is the same
else:
# Due to floating pointing operations, must zero out
# elements that are "very" small in the dense matrix
# See parameter for default value.
# Get the ndarray version of the dense matrix
dense_matrix = matrix.todense().getA()
# Since complex values can't be compared, must split
# the matrix into real and imaginary components
# Find the real values in between -eps and eps
bool_real = np.logical_and(dense_matrix.real > -eps,
dense_matrix.real < eps)
# Find the imaginary values between -eps and eps
bool_imag = np.logical_and(dense_matrix.imag > -eps,
dense_matrix.imag < eps)
# Replaces values between -eps and eps with 0
corrected_real = np.where(bool_real, 0.0, dense_matrix.real)
corrected_imag = np.where(bool_imag, 0.0, dense_matrix.imag)
# Convert the matrix with real values into imaginary values
corrected_imag = corrected_imag * np.complex(1j)
# Recombine the real and imaginary components
corrected_dense = corrected_real + corrected_imag
# Check if it's diagonal
row_indices = corrected_dense.nonzero()[0]
col_indices = corrected_dense.nonzero()[1]
# Check if the rows indices and columns indices are the same
# If they match, then matrix only contains elements along diagonal
bool_indices = row_indices == col_indices
is_diagonal = bool_indices.all()
first_element = corrected_dense[0][0]
# If the first element is a zero, then can't rescale matrix
# and definitely not diagonal
if (first_element == 0.0 + 0.0j):
return False
# The dimensions of the dense matrix should still
# be 2^nqubits if there are elements all along the
# the main diagonal
trace_of_corrected = (corrected_dense/first_element).trace()
expected_trace = pow(2, nqubits)
has_correct_trace = trace_of_corrected == expected_trace
# If only looking for identity matrices
# first element must be a 1
real_is_one = abs(first_element.real - 1.0) < eps
imag_is_zero = abs(first_element.imag) < eps
is_one = real_is_one and imag_is_zero
is_identity = is_one if identity_only else True
return bool(is_diagonal and has_correct_trace and is_identity)
def is_scalar_nonsparse_matrix(circuit, nqubits, identity_only):
"""Checks if a given circuit, in matrix form, is equivalent to
a scalar value.
Parameters
==========
circuit : Gate tuple
Sequence of quantum gates representing a quantum circuit
nqubits : int
Number of qubits in the circuit
identity_only : bool
Check for only identity matrices
Note: Used in situations when is_scalar_sparse_matrix has bugs
"""
matrix = represent(Mul(*circuit), nqubits=nqubits)
# In some cases, represent returns a 1D scalar value in place
# of a multi-dimensional scalar matrix
if (isinstance(matrix, Number)):
return matrix == 1 if identity_only else True
# If represent returns a matrix, check if the matrix is diagonal
# and if every item along the diagonal is the same
else:
# Added up the diagonal elements
matrix_trace = matrix.trace()
# Divide the trace by the first element in the matrix
# if matrix is not required to be the identity matrix
adjusted_matrix_trace = (matrix_trace/matrix[0]
if not identity_only
else matrix_trace)
is_identity = matrix[0] == 1.0 if identity_only else True
has_correct_trace = adjusted_matrix_trace == pow(2, nqubits)
# The matrix is scalar if it's diagonal and the adjusted trace
# value is equal to 2^nqubits
return bool(
matrix.is_diagonal() and has_correct_trace and is_identity)
if np and scipy:
is_scalar_matrix = is_scalar_sparse_matrix
else:
is_scalar_matrix = is_scalar_nonsparse_matrix
def _get_min_qubits(a_gate):
if isinstance(a_gate, Pow):
return a_gate.base.min_qubits
else:
return a_gate.min_qubits
def ll_op(left, right):
"""Perform a LL operation.
A LL operation multiplies both left and right circuits
with the dagger of the left circuit's leftmost gate, and
the dagger is multiplied on the left side of both circuits.
If a LL is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a LL is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a LL operation:
>>> from sympy.physics.quantum.identitysearch import ll_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> ll_op((x, y, z), ())
((Y(0), Z(0)), (X(0),))
>>> ll_op((y, z), (x,))
((Z(0),), (Y(0), X(0)))
"""
if (len(left) > 0):
ll_gate = left[0]
ll_gate_is_unitary = is_scalar_matrix(
(Dagger(ll_gate), ll_gate), _get_min_qubits(ll_gate), True)
if (len(left) > 0 and ll_gate_is_unitary):
# Get the new left side w/o the leftmost gate
new_left = left[1:len(left)]
# Add the leftmost gate to the left position on the right side
new_right = (Dagger(ll_gate),) + right
# Return the new gate rule
return (new_left, new_right)
return None
def lr_op(left, right):
"""Perform a LR operation.
A LR operation multiplies both left and right circuits
with the dagger of the left circuit's rightmost gate, and
the dagger is multiplied on the right side of both circuits.
If a LR is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a LR is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a LR operation:
>>> from sympy.physics.quantum.identitysearch import lr_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> lr_op((x, y, z), ())
((X(0), Y(0)), (Z(0),))
>>> lr_op((x, y), (z,))
((X(0),), (Z(0), Y(0)))
"""
if (len(left) > 0):
lr_gate = left[len(left) - 1]
lr_gate_is_unitary = is_scalar_matrix(
(Dagger(lr_gate), lr_gate), _get_min_qubits(lr_gate), True)
if (len(left) > 0 and lr_gate_is_unitary):
# Get the new left side w/o the rightmost gate
new_left = left[0:len(left) - 1]
# Add the rightmost gate to the right position on the right side
new_right = right + (Dagger(lr_gate),)
# Return the new gate rule
return (new_left, new_right)
return None
def rl_op(left, right):
"""Perform a RL operation.
A RL operation multiplies both left and right circuits
with the dagger of the right circuit's leftmost gate, and
the dagger is multiplied on the left side of both circuits.
If a RL is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a RL is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a RL operation:
>>> from sympy.physics.quantum.identitysearch import rl_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> rl_op((x,), (y, z))
((Y(0), X(0)), (Z(0),))
>>> rl_op((x, y), (z,))
((Z(0), X(0), Y(0)), ())
"""
if (len(right) > 0):
rl_gate = right[0]
rl_gate_is_unitary = is_scalar_matrix(
(Dagger(rl_gate), rl_gate), _get_min_qubits(rl_gate), True)
if (len(right) > 0 and rl_gate_is_unitary):
# Get the new right side w/o the leftmost gate
new_right = right[1:len(right)]
# Add the leftmost gate to the left position on the left side
new_left = (Dagger(rl_gate),) + left
# Return the new gate rule
return (new_left, new_right)
return None
def rr_op(left, right):
"""Perform a RR operation.
A RR operation multiplies both left and right circuits
with the dagger of the right circuit's rightmost gate, and
the dagger is multiplied on the right side of both circuits.
If a RR is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a RR is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a RR operation:
>>> from sympy.physics.quantum.identitysearch import rr_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> rr_op((x, y), (z,))
((X(0), Y(0), Z(0)), ())
>>> rr_op((x,), (y, z))
((X(0), Z(0)), (Y(0),))
"""
if (len(right) > 0):
rr_gate = right[len(right) - 1]
rr_gate_is_unitary = is_scalar_matrix(
(Dagger(rr_gate), rr_gate), _get_min_qubits(rr_gate), True)
if (len(right) > 0 and rr_gate_is_unitary):
# Get the new right side w/o the rightmost gate
new_right = right[0:len(right) - 1]
# Add the rightmost gate to the right position on the right side
new_left = left + (Dagger(rr_gate),)
# Return the new gate rule
return (new_left, new_right)
return None
def generate_gate_rules(gate_seq, return_as_muls=False):
"""Returns a set of gate rules. Each gate rules is represented
as a 2-tuple of tuples or Muls. An empty tuple represents an arbitrary
scalar value.
This function uses the four operations (LL, LR, RL, RR)
to generate the gate rules.
A gate rule is an expression such as ABC = D or AB = CD, where
A, B, C, and D are gates. Each value on either side of the
equal sign represents a circuit. The four operations allow
one to find a set of equivalent circuits from a gate identity.
The letters denoting the operation tell the user what
activities to perform on each expression. The first letter
indicates which side of the equal sign to focus on. The
second letter indicates which gate to focus on given the
side. Once this information is determined, the inverse
of the gate is multiplied on both circuits to create a new
gate rule.
For example, given the identity, ABCD = 1, a LL operation
means look at the left value and multiply both left sides by the
inverse of the leftmost gate A. If A is Hermitian, the inverse
of A is still A. The resulting new rule is BCD = A.
The following is a summary of the four operations. Assume
that in the examples, all gates are Hermitian.
LL : left circuit, left multiply
ABCD = E -> AABCD = AE -> BCD = AE
LR : left circuit, right multiply
ABCD = E -> ABCDD = ED -> ABC = ED
RL : right circuit, left multiply
ABC = ED -> EABC = EED -> EABC = D
RR : right circuit, right multiply
AB = CD -> ABD = CDD -> ABD = C
The number of gate rules generated is n*(n+1), where n
is the number of gates in the sequence (unproven).
Parameters
==========
gate_seq : Gate tuple, Mul, or Number
A variable length tuple or Mul of Gates whose product is equal to
a scalar matrix
return_as_muls : bool
True to return a set of Muls; False to return a set of tuples
Examples
========
Find the gate rules of the current circuit using tuples:
>>> from sympy.physics.quantum.identitysearch import generate_gate_rules
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> generate_gate_rules((x, x))
set([((X(0),), (X(0),)), ((X(0), X(0)), ())])
>>> generate_gate_rules((x, y, z))
set([((), (X(0), Z(0), Y(0))), ((), (Y(0), X(0), Z(0))),
((), (Z(0), Y(0), X(0))), ((X(0),), (Z(0), Y(0))),
((Y(0),), (X(0), Z(0))), ((Z(0),), (Y(0), X(0))),
((X(0), Y(0)), (Z(0),)), ((Y(0), Z(0)), (X(0),)),
((Z(0), X(0)), (Y(0),)), ((X(0), Y(0), Z(0)), ()),
((Y(0), Z(0), X(0)), ()), ((Z(0), X(0), Y(0)), ())])
Find the gate rules of the current circuit using Muls:
>>> generate_gate_rules(x*x, return_as_muls=True)
set([(1, 1)])
>>> generate_gate_rules(x*y*z, return_as_muls=True)
set([(1, X(0)*Z(0)*Y(0)), (1, Y(0)*X(0)*Z(0)),
(1, Z(0)*Y(0)*X(0)), (X(0)*Y(0), Z(0)),
(Y(0)*Z(0), X(0)), (Z(0)*X(0), Y(0)),
(X(0)*Y(0)*Z(0), 1), (Y(0)*Z(0)*X(0), 1),
(Z(0)*X(0)*Y(0), 1), (X(0), Z(0)*Y(0)),
(Y(0), X(0)*Z(0)), (Z(0), Y(0)*X(0))])
"""
if isinstance(gate_seq, Number):
if return_as_muls:
return {(Integer(1), Integer(1))}
else:
return {((), ())}
elif isinstance(gate_seq, Mul):
gate_seq = gate_seq.args
# Each item in queue is a 3-tuple:
# i) first item is the left side of an equality
# ii) second item is the right side of an equality
# iii) third item is the number of operations performed
# The argument, gate_seq, will start on the left side, and
# the right side will be empty, implying the presence of an
# identity.
queue = deque()
# A set of gate rules
rules = set()
# Maximum number of operations to perform
max_ops = len(gate_seq)
def process_new_rule(new_rule, ops):
if new_rule is not None:
new_left, new_right = new_rule
if new_rule not in rules and (new_right, new_left) not in rules:
rules.add(new_rule)
# If haven't reached the max limit on operations
if ops + 1 < max_ops:
queue.append(new_rule + (ops + 1,))
queue.append((gate_seq, (), 0))
rules.add((gate_seq, ()))
while len(queue) > 0:
left, right, ops = queue.popleft()
# Do a LL
new_rule = ll_op(left, right)
process_new_rule(new_rule, ops)
# Do a LR
new_rule = lr_op(left, right)
process_new_rule(new_rule, ops)
# Do a RL
new_rule = rl_op(left, right)
process_new_rule(new_rule, ops)
# Do a RR
new_rule = rr_op(left, right)
process_new_rule(new_rule, ops)
if return_as_muls:
# Convert each rule as tuples into a rule as muls
mul_rules = set()
for rule in rules:
left, right = rule
mul_rules.add((Mul(*left), Mul(*right)))
rules = mul_rules
return rules
def generate_equivalent_ids(gate_seq, return_as_muls=False):
"""Returns a set of equivalent gate identities.
A gate identity is a quantum circuit such that the product
of the gates in the circuit is equal to a scalar value.
For example, XYZ = i, where X, Y, Z are the Pauli gates and
i is the imaginary value, is considered a gate identity.
This function uses the four operations (LL, LR, RL, RR)
to generate the gate rules and, subsequently, to locate equivalent
gate identities.
Note that all equivalent identities are reachable in n operations
from the starting gate identity, where n is the number of gates
in the sequence.
The max number of gate identities is 2n, where n is the number
of gates in the sequence (unproven).
Parameters
==========
gate_seq : Gate tuple, Mul, or Number
A variable length tuple or Mul of Gates whose product is equal to
a scalar matrix.
return_as_muls: bool
True to return as Muls; False to return as tuples
Examples
========
Find equivalent gate identities from the current circuit with tuples:
>>> from sympy.physics.quantum.identitysearch import generate_equivalent_ids
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> generate_equivalent_ids((x, x))
set([(X(0), X(0))])
>>> generate_equivalent_ids((x, y, z))
set([(X(0), Y(0), Z(0)), (X(0), Z(0), Y(0)), (Y(0), X(0), Z(0)),
(Y(0), Z(0), X(0)), (Z(0), X(0), Y(0)), (Z(0), Y(0), X(0))])
Find equivalent gate identities from the current circuit with Muls:
>>> generate_equivalent_ids(x*x, return_as_muls=True)
set([1])
>>> generate_equivalent_ids(x*y*z, return_as_muls=True)
set([X(0)*Y(0)*Z(0), X(0)*Z(0)*Y(0), Y(0)*X(0)*Z(0),
Y(0)*Z(0)*X(0), Z(0)*X(0)*Y(0), Z(0)*Y(0)*X(0)])
"""
if isinstance(gate_seq, Number):
return {Integer(1)}
elif isinstance(gate_seq, Mul):
gate_seq = gate_seq.args
# Filter through the gate rules and keep the rules
# with an empty tuple either on the left or right side
# A set of equivalent gate identities
eq_ids = set()
gate_rules = generate_gate_rules(gate_seq)
for rule in gate_rules:
l, r = rule
if l == ():
eq_ids.add(r)
elif r == ():
eq_ids.add(l)
if return_as_muls:
convert_to_mul = lambda id_seq: Mul(*id_seq)
eq_ids = set(map(convert_to_mul, eq_ids))
return eq_ids
class GateIdentity(Basic):
"""Wrapper class for circuits that reduce to a scalar value.
A gate identity is a quantum circuit such that the product
of the gates in the circuit is equal to a scalar value.
For example, XYZ = i, where X, Y, Z are the Pauli gates and
i is the imaginary value, is considered a gate identity.
Parameters
==========
args : Gate tuple
A variable length tuple of Gates that form an identity.
Examples
========
Create a GateIdentity and look at its attributes:
>>> from sympy.physics.quantum.identitysearch import GateIdentity
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> an_identity = GateIdentity(x, y, z)
>>> an_identity.circuit
X(0)*Y(0)*Z(0)
>>> an_identity.equivalent_ids
set([(X(0), Y(0), Z(0)), (X(0), Z(0), Y(0)), (Y(0), X(0), Z(0)),
(Y(0), Z(0), X(0)), (Z(0), X(0), Y(0)), (Z(0), Y(0), X(0))])
"""
def __new__(cls, *args):
# args should be a tuple - a variable length argument list
obj = Basic.__new__(cls, *args)
obj._circuit = Mul(*args)
obj._rules = generate_gate_rules(args)
obj._eq_ids = generate_equivalent_ids(args)
return obj
@property
def circuit(self):
return self._circuit
@property
def gate_rules(self):
return self._rules
@property
def equivalent_ids(self):
return self._eq_ids
@property
def sequence(self):
return self.args
def __str__(self):
"""Returns the string of gates in a tuple."""
return str(self.circuit)
def is_degenerate(identity_set, gate_identity):
"""Checks if a gate identity is a permutation of another identity.
Parameters
==========
identity_set : set
A Python set with GateIdentity objects.
gate_identity : GateIdentity
The GateIdentity to check for existence in the set.
Examples
========
Check if the identity is a permutation of another identity:
>>> from sympy.physics.quantum.identitysearch import (
... GateIdentity, is_degenerate)
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> an_identity = GateIdentity(x, y, z)
>>> id_set = set([an_identity])
>>> another_id = (y, z, x)
>>> is_degenerate(id_set, another_id)
True
>>> another_id = (x, x)
>>> is_degenerate(id_set, another_id)
False
"""
# For now, just iteratively go through the set and check if the current
# gate_identity is a permutation of an identity in the set
for an_id in identity_set:
if (gate_identity in an_id.equivalent_ids):
return True
return False
def is_reducible(circuit, nqubits, begin, end):
"""Determines if a circuit is reducible by checking
if its subcircuits are scalar values.
Parameters
==========
circuit : Gate tuple
A tuple of Gates representing a circuit. The circuit to check
if a gate identity is contained in a subcircuit.
nqubits : int
The number of qubits the circuit operates on.
begin : int
The leftmost gate in the circuit to include in a subcircuit.
end : int
The rightmost gate in the circuit to include in a subcircuit.
Examples
========
Check if the circuit can be reduced:
>>> from sympy.physics.quantum.identitysearch import (
... GateIdentity, is_reducible)
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> is_reducible((x, y, z), 1, 0, 3)
True
Check if an interval in the circuit can be reduced:
>>> is_reducible((x, y, z), 1, 1, 3)
False
>>> is_reducible((x, y, y), 1, 1, 3)
True
"""
current_circuit = ()
# Start from the gate at "end" and go down to almost the gate at "begin"
for ndx in reversed(range(begin, end)):
next_gate = circuit[ndx]
current_circuit = (next_gate,) + current_circuit
# If a circuit as a matrix is equivalent to a scalar value
if (is_scalar_matrix(current_circuit, nqubits, False)):
return True
return False
def bfs_identity_search(gate_list, nqubits, max_depth=None,
identity_only=False):
"""Constructs a set of gate identities from the list of possible gates.
Performs a breadth first search over the space of gate identities.
This allows the finding of the shortest gate identities first.
Parameters
==========
gate_list : list, Gate
A list of Gates from which to search for gate identities.
nqubits : int
The number of qubits the quantum circuit operates on.
max_depth : int
The longest quantum circuit to construct from gate_list.
identity_only : bool
True to search for gate identities that reduce to identity;
False to search for gate identities that reduce to a scalar.
Examples
========
Find a list of gate identities:
>>> from sympy.physics.quantum.identitysearch import bfs_identity_search
>>> from sympy.physics.quantum.gate import X, Y, Z, H
>>> x = X(0); y = Y(0); z = Z(0)
>>> bfs_identity_search([x], 1, max_depth=2)
set([GateIdentity(X(0), X(0))])
>>> bfs_identity_search([x, y, z], 1)
set([GateIdentity(X(0), X(0)), GateIdentity(Y(0), Y(0)),
GateIdentity(Z(0), Z(0)), GateIdentity(X(0), Y(0), Z(0))])
Find a list of identities that only equal to 1:
>>> bfs_identity_search([x, y, z], 1, identity_only=True)
set([GateIdentity(X(0), X(0)), GateIdentity(Y(0), Y(0)),
GateIdentity(Z(0), Z(0))])
"""
if max_depth is None or max_depth <= 0:
max_depth = len(gate_list)
id_only = identity_only
# Start with an empty sequence (implicitly contains an IdentityGate)
queue = deque([()])
# Create an empty set of gate identities
ids = set()
# Begin searching for gate identities in given space.
while (len(queue) > 0):
current_circuit = queue.popleft()
for next_gate in gate_list:
new_circuit = current_circuit + (next_gate,)
# Determines if a (strict) subcircuit is a scalar matrix
circuit_reducible = is_reducible(new_circuit, nqubits,
1, len(new_circuit))
# In many cases when the matrix is a scalar value,
# the evaluated matrix will actually be an integer
if (is_scalar_matrix(new_circuit, nqubits, id_only) and
not is_degenerate(ids, new_circuit) and
not circuit_reducible):
ids.add(GateIdentity(*new_circuit))
elif (len(new_circuit) < max_depth and
not circuit_reducible):
queue.append(new_circuit)
return ids
def random_identity_search(gate_list, numgates, nqubits):
"""Randomly selects numgates from gate_list and checks if it is
a gate identity.
If the circuit is a gate identity, the circuit is returned;
Otherwise, None is returned.
"""
gate_size = len(gate_list)
circuit = ()
for i in range(numgates):
next_gate = gate_list[randint(0, gate_size - 1)]
circuit = circuit + (next_gate,)
is_scalar = is_scalar_matrix(circuit, nqubits, False)
return circuit if is_scalar else None
|
|
#!/usr/bin/env python
import sys
import os
import inspect
import traceback
import yaml
import pycurl
import json
import csv
import logging
import threading
from optparse import OptionParser
from email import message_from_string # For headers handling
import time
try:
from cStringIO import StringIO as MyIO
except:
try:
from StringIO import StringIO as MyIO
except ImportError:
from io import BytesIO as MyIO
ESCAPE_DECODING = 'string-escape'
# Python 3 compatibility
if sys.version_info[0] > 2:
from past.builtins import basestring
from builtins import range as xrange
ESCAPE_DECODING = 'unicode_escape'
# Dirty hack to allow for running this as a script :-/
if __name__ == '__main__':
sys.path.append(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))))
from pyresttest.six import text_type
from pyresttest.binding import Context
from pyresttest import generators
from pyresttest import validators
from pyresttest import tests
from pyresttest.generators import parse_generator
from pyresttest.parsing import flatten_dictionaries, lowercase_keys, safe_to_bool, safe_to_json
from pyresttest.validators import Failure
from pyresttest.tests import Test, DEFAULT_TIMEOUT
from pyresttest.benchmarks import Benchmark, AGGREGATES, METRICS, parse_benchmark
else: # Normal imports
from . import six
from .six import text_type
# Pyresttest internals
from . import binding
from .binding import Context
from . import generators
from .generators import parse_generator
from . import parsing
from .parsing import flatten_dictionaries, lowercase_keys, safe_to_bool, safe_to_json
from . import validators
from .validators import Failure
from . import tests
from .tests import Test, DEFAULT_TIMEOUT
from . import benchmarks
from .benchmarks import Benchmark, AGGREGATES, METRICS, parse_benchmark
"""
Executable class, ties everything together into the framework.
Module responsibilities:
- Read & import test test_files
- Parse test configs
- Provide executor methods for sets of tests and benchmarks
- Collect and report on test/benchmark results
- Perform analysis on benchmark results
"""
HEADER_ENCODING ='ISO-8859-1' # Per RFC 2616
LOGGING_LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
logging.basicConfig(format='%(levelname)s:%(message)s')
logger = logging.getLogger('pyresttest')
DIR_LOCK = threading.RLock() # Guards operations changing the working directory
class cd:
"""Context manager for changing the current working directory"""
# http://stackoverflow.com/questions/431684/how-do-i-cd-in-python/13197763#13197763
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
if self.newPath: # Don't CD to nothingness
DIR_LOCK.acquire()
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
if self.newPath: # Don't CD to nothingness
os.chdir(self.savedPath)
DIR_LOCK.release()
class TestConfig:
""" Configuration for a test run """
timeout = DEFAULT_TIMEOUT # timeout of tests, in seconds
print_bodies = False # Print response bodies in all cases
print_headers = False # Print response bodies in all cases
retries = 0 # Retries on failures
test_parallel = False # Allow parallel execution of tests in a test set, for speed?
interactive = False
verbose = False
ssl_insecure = False
skip_term_colors = False # Turn off output term colors
# Binding and creation of generators
variable_binds = None
generators = None # Map of generator name to generator function
def __str__(self):
return json.dumps(self, default=safe_to_json)
class TestSet:
""" Encapsulates a set of tests and test configuration for them """
tests = list()
benchmarks = list()
config = TestConfig()
def __init__(self):
self.config = TestConfig()
self.tests = list()
self.benchmarks = list()
def __str__(self):
return json.dumps(self, default=safe_to_json)
class BenchmarkResult:
""" Stores results from a benchmark for reporting use """
group = None
name = u'unnamed'
results = dict() # Benchmark output, map the metric to the result array for that metric
aggregates = list() # List of aggregates, as tuples of (metricname, aggregate, result)
failures = 0 # Track call count that failed
def __init__(self):
self.aggregates = list()
self.results = list()
def __str__(self):
return json.dumps(self, default=safe_to_json)
class TestResponse:
""" Encapsulates everything about a test response """
test = None # Test run
response_code = None
body = None # Response body, if tracked
passed = False
response_headers = None
failures = None
def __init__(self):
self.failures = list()
def __str__(self):
return json.dumps(self, default=safe_to_json)
def read_test_file(path):
""" Read test file at 'path' in YAML """
# TODO allow use of safe_load_all to handle multiple test sets in a given
# doc
teststruct = yaml.safe_load(read_file(path))
return teststruct
def parse_headers(header_string):
""" Parse a header-string into individual headers
Implementation based on: http://stackoverflow.com/a/5955949/95122
Note that headers are a list of (key, value) since duplicate headers are allowed
NEW NOTE: keys & values are unicode strings, but can only contain ISO-8859-1 characters
"""
# First line is request line, strip it out
if not header_string:
return list()
request, headers = header_string.split('\r\n', 1)
if not headers:
return list()
# Python 2.6 message header parsing fails for Unicode strings, 2.7 is fine. Go figure.
if sys.version_info < (2,7):
header_msg = message_from_string(headers.encode(HEADER_ENCODING))
return [(text_type(k.lower(), HEADER_ENCODING), text_type(v, HEADER_ENCODING))
for k, v in header_msg.items()]
else:
header_msg = message_from_string(headers)
# Note: HTTP headers are *case-insensitive* per RFC 2616
return [(k.lower(), v) for k, v in header_msg.items()]
def parse_testsets(base_url, test_structure, test_files=set(), working_directory=None, vars=None):
""" Convert a Python data structure read from validated YAML to a set of structured testsets
The data structure is assumed to be a list of dictionaries, each of which describes:
- a tests (test structure)
- a simple test (just a URL, and a minimal test is created)
- or overall test configuration for this testset
- an import (load another set of tests into this one, from a separate file)
- For imports, these are recursive, and will use the parent config if none is present
Note: test_files is used to track tests that import other tests, to avoid recursive loops
This returns a list of testsets, corresponding to imported testsets and in-line multi-document sets
"""
tests_out = list()
test_config = TestConfig()
testsets = list()
benchmarks = list()
if working_directory is None:
working_directory = os.path.abspath(os.getcwd())
if vars and isinstance(vars, dict):
test_config.variable_binds = vars
# returns a testconfig and collection of tests
for node in test_structure: # Iterate through lists of test and configuration elements
if isinstance(node, dict): # Each config element is a miniature key-value dictionary
node = lowercase_keys(node)
for key in node:
if key == u'import':
importfile = node[key] # import another file
if importfile not in test_files:
logger.debug("Importing test sets: " + importfile)
test_files.add(importfile)
import_test_structure = read_test_file(importfile)
with cd(os.path.dirname(os.path.realpath(importfile))):
import_testsets = parse_testsets(
base_url, import_test_structure, test_files, vars=vars)
testsets.extend(import_testsets)
elif key == u'url': # Simple test, just a GET to a URL
mytest = Test()
val = node[key]
assert isinstance(val, basestring)
mytest.url = base_url + val
tests_out.append(mytest)
elif key == u'test': # Complex test with additional parameters
with cd(working_directory):
child = node[key]
mytest = Test.parse_test(base_url, child)
tests_out.append(mytest)
elif key == u'benchmark':
benchmark = parse_benchmark(base_url, node[key])
benchmarks.append(benchmark)
elif key == u'config' or key == u'configuration':
test_config = parse_configuration(
node[key], base_config=test_config)
testset = TestSet()
testset.tests = tests_out
testset.config = test_config
testset.benchmarks = benchmarks
testsets.append(testset)
return testsets
def parse_configuration(node, base_config=None):
""" Parse input config to configuration information """
test_config = base_config
if not test_config:
test_config = TestConfig()
node = lowercase_keys(flatten_dictionaries(node)) # Make it usable
for key, value in node.items():
if key == u'timeout':
test_config.timeout = int(value)
elif key == u'print_bodies':
test_config.print_bodies = safe_to_bool(value)
elif key == u'retries':
test_config.retries = int(value)
elif key == u'variable_binds':
if not test_config.variable_binds:
test_config.variable_binds = dict()
test_config.variable_binds.update(flatten_dictionaries(value))
elif key == u'generators':
flat = flatten_dictionaries(value)
gen_map = dict()
for generator_name, generator_config in flat.items():
gen = parse_generator(generator_config)
gen_map[str(generator_name)] = gen
test_config.generators = gen_map
return test_config
def read_file(path):
""" Read an input into a file, doing necessary conversions around relative path handling """
with open(path, "r") as f:
string = f.read()
f.close()
return string
def run_test(mytest, test_config=TestConfig(), context=None, curl_handle=None, *args, **kwargs):
""" Put together test pieces: configure & run actual test, return results """
# Initialize a context if not supplied
my_context = context
if my_context is None:
my_context = Context()
mytest.update_context_before(my_context)
templated_test = mytest.realize(my_context)
curl = templated_test.configure_curl(
timeout=test_config.timeout, context=my_context, curl_handle=curl_handle)
result = TestResponse()
result.test = templated_test
# reset the body, it holds values from previous runs otherwise
headers = MyIO()
body = MyIO()
curl.setopt(pycurl.WRITEFUNCTION, body.write)
curl.setopt(pycurl.HEADERFUNCTION, headers.write)
if test_config.verbose:
curl.setopt(pycurl.VERBOSE, True)
if test_config.ssl_insecure:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
result.passed = None
if test_config.interactive:
print("===================================")
print("%s" % mytest.name)
print("-----------------------------------")
print("REQUEST:")
print("%s %s" % (templated_test.method, templated_test.url))
print("HEADERS:")
print("%s" % (templated_test.headers))
if mytest.body is not None:
print("\n%s" % templated_test.body)
if sys.version_info >= (3,0):
input("Press ENTER when ready (%d): " % (mytest.delay))
else:
raw_input("Press ENTER when ready (%d): " % (mytest.delay))
if mytest.delay > 0:
print("Delaying for %ds" % mytest.delay)
time.sleep(mytest.delay)
try:
curl.perform() # Run the actual call
except Exception as e:
# Curl exception occurred (network error), do not pass go, do not
# collect $200
trace = traceback.format_exc()
result.failures.append(Failure(message="Curl Exception: {0}".format(
e), details=trace, failure_type=validators.FAILURE_CURL_EXCEPTION))
result.passed = False
curl.close()
return result
# Retrieve values
result.body = body.getvalue()
body.close()
result.response_headers = text_type(headers.getvalue(), HEADER_ENCODING) # Per RFC 2616
headers.close()
response_code = curl.getinfo(pycurl.RESPONSE_CODE)
result.response_code = response_code
logger.debug("Initial Test Result, based on expected response code: " +
str(response_code in mytest.expected_status))
if response_code in mytest.expected_status:
result.passed = True
else:
# Invalid response code
result.passed = False
failure_message = "Invalid HTTP response code: response code {0} not in expected codes [{1}]".format(
response_code, mytest.expected_status)
result.failures.append(Failure(
message=failure_message, details=None, failure_type=validators.FAILURE_INVALID_RESPONSE))
# Parse HTTP headers
try:
result.response_headers = parse_headers(result.response_headers)
except Exception as e:
trace = traceback.format_exc()
result.failures.append(Failure(message="Header parsing exception: {0}".format(
e), details=trace, failure_type=validators.FAILURE_TEST_EXCEPTION))
result.passed = False
curl.close()
return result
# print str(test_config.print_bodies) + ',' + str(not result.passed) + ' ,
# ' + str(test_config.print_bodies or not result.passed)
head = result.response_headers
# execute validator on body
if result.passed is True:
body = result.body
if mytest.validators is not None and isinstance(mytest.validators, list):
logger.debug("executing this many validators: " +
str(len(mytest.validators)))
failures = result.failures
for validator in mytest.validators:
validate_result = validator.validate(
body=body, headers=head, context=my_context)
if not validate_result:
result.passed = False
# Proxy for checking if it is a Failure object, because of
# import issues with isinstance there
if hasattr(validate_result, 'details'):
failures.append(validate_result)
# TODO add printing of validation for interactive mode
else:
logger.debug("no validators found")
# Only do context updates if test was successful
mytest.update_context_after(result.body, head, my_context)
# Print response body if override is set to print all *OR* if test failed
# (to capture maybe a stack trace)
if test_config.print_bodies or not result.passed:
if test_config.interactive:
print("RESPONSE:")
print(result.body.decode(ESCAPE_DECODING))
if test_config.print_headers or not result.passed:
if test_config.interactive:
print("RESPONSE HEADERS:")
print(result.response_headers)
# TODO add string escape on body output
logger.debug(result)
return result
def run_benchmark(benchmark, test_config=TestConfig(), context=None, *args, **kwargs):
""" Perform a benchmark, (re)using a given, configured CURL call to do so
The actual analysis of metrics is performed separately, to allow for testing
"""
# Context handling
my_context = context
if my_context is None:
my_context = Context()
warmup_runs = benchmark.warmup_runs
benchmark_runs = benchmark.benchmark_runs
message = '' # Message is name of benchmark... print it?
if (benchmark_runs <= 0):
raise Exception(
"Invalid number of benchmark runs, must be > 0 :" + benchmark_runs)
result = TestResponse()
# TODO create and use a curl-returning configuration function
# TODO create and use a post-benchmark cleanup function
# They should use is_dynamic/is_context_modifier to determine if they need to
# worry about context and re-reading/retemplating and only do it if needed
# - Also, they will need to be smart enough to handle extraction functions
# For performance reasons, we don't want to re-run templating/extraction if
# we do not need to, and do not want to save request bodies.
# Initialize variables to store output
output = BenchmarkResult()
output.name = benchmark.name
output.group = benchmark.group
metricnames = list(benchmark.metrics)
# Metric variable for curl, to avoid hash lookup for every metric name
metricvalues = [METRICS[name] for name in metricnames]
# Initialize arrays to store results for each metric
results = [list() for x in xrange(0, len(metricnames))]
curl = pycurl.Curl()
# Benchmark warm-up to allow for caching, JIT compiling, on client
logger.info('Warmup: ' + message + ' started')
for x in xrange(0, warmup_runs):
benchmark.update_context_before(my_context)
templated = benchmark.realize(my_context)
curl = templated.configure_curl(
timeout=test_config.timeout, context=my_context, curl_handle=curl)
# Do not store actual response body at all.
curl.setopt(pycurl.WRITEFUNCTION, lambda x: None)
curl.perform()
logger.info('Warmup: ' + message + ' finished')
logger.info('Benchmark: ' + message + ' starting')
for x in xrange(0, benchmark_runs): # Run the actual benchmarks
# Setup benchmark
benchmark.update_context_before(my_context)
templated = benchmark.realize(my_context)
curl = templated.configure_curl(
timeout=test_config.timeout, context=my_context, curl_handle=curl)
# Do not store actual response body at all.
curl.setopt(pycurl.WRITEFUNCTION, lambda x: None)
try: # Run the curl call, if it errors, then add to failure counts for benchmark
curl.perform()
except Exception:
output.failures = output.failures + 1
curl.close()
curl = pycurl.Curl()
continue # Skip metrics collection
# Get all metrics values for this run, and store to metric lists
for i in xrange(0, len(metricnames)):
results[i].append(curl.getinfo(metricvalues[i]))
logger.info('Benchmark: ' + message + ' ending')
temp_results = dict()
for i in xrange(0, len(metricnames)):
temp_results[metricnames[i]] = results[i]
output.results = temp_results
return analyze_benchmark_results(output, benchmark)
def analyze_benchmark_results(benchmark_result, benchmark):
""" Take a benchmark result containing raw benchmark results, and do aggregation by
applying functions
Aggregates come out in format of metricname, aggregate_name, result """
output = BenchmarkResult()
output.name = benchmark_result.name
output.group = benchmark_result.group
output.failures = benchmark_result.failures
# Copy raw metric arrays over where necessary
raw_results = benchmark_result.results
temp = dict()
for metric in benchmark.raw_metrics:
temp[metric] = raw_results[metric]
output.results = temp
# Compute aggregates for each metric, and add tuples to aggregate results
aggregate_results = list()
for metricname, aggregate_list in benchmark.aggregated_metrics.items():
numbers = raw_results[metricname]
for aggregate_name in aggregate_list:
if numbers: # Only compute aggregates if numbers exist
aggregate_function = AGGREGATES[aggregate_name]
aggregate_results.append(
(metricname, aggregate_name, aggregate_function(numbers)))
else:
aggregate_results.append((metricname, aggregate_name, None))
output.aggregates = aggregate_results
return output
def metrics_to_tuples(raw_metrics):
""" Converts metric dictionary of name:values_array into list of tuples
Use case: writing out benchmark to CSV, etc
Input:
{'metric':[value1,value2...], 'metric2':[value1,value2,...]...}
Output: list, with tuple header row, then list of tuples of values
[('metric','metric',...), (metric1_value1,metric2_value1, ...) ... ]
"""
if not isinstance(raw_metrics, dict):
raise TypeError("Input must be dictionary!")
metrics = sorted(raw_metrics.keys())
arrays = [raw_metrics[metric] for metric in metrics]
num_rows = len(arrays[0]) # Assume all same size or this fails
output = list()
output.append(tuple(metrics)) # Add headers
# Create list of tuples mimicking 2D array from input
for row in xrange(0, num_rows):
new_row = tuple([arrays[col][row] for col in xrange(0, len(arrays))])
output.append(new_row)
return output
def write_benchmark_json(file_out, benchmark_result, benchmark, test_config=TestConfig()):
""" Writes benchmark to file as json """
json.dump(benchmark_result, file_out, default=safe_to_json)
def write_benchmark_csv(file_out, benchmark_result, benchmark, test_config=TestConfig()):
""" Writes benchmark to file as csv """
writer = csv.writer(file_out)
writer.writerow(('Benchmark', benchmark_result.name))
writer.writerow(('Benchmark Group', benchmark_result.group))
writer.writerow(('Failures', benchmark_result.failures))
# Write result arrays
if benchmark_result.results:
writer.writerow(('Results', ''))
writer.writerows(metrics_to_tuples(benchmark_result.results))
if benchmark_result.aggregates:
writer.writerow(('Aggregates', ''))
writer.writerows(benchmark_result.aggregates)
# Method to call when writing benchmark file
OUTPUT_METHODS = {u'csv': write_benchmark_csv, u'json': write_benchmark_json}
def log_failure(failure, context=None, test_config=TestConfig()):
""" Log a failure from a test """
logger.error("Test Failure, failure type: {0}, Reason: {1}".format(
failure.failure_type, failure.message))
if failure.details:
logger.error("Validator/Error details:" + str(failure.details))
def run_testsets(testsets):
""" Execute a set of tests, using given TestSet list input """
group_results = dict() # results, by group
group_failure_counts = dict()
total_failures = 0
myinteractive = False
curl_handle = pycurl.Curl()
for testset in testsets:
mytests = testset.tests
myconfig = testset.config
mybenchmarks = testset.benchmarks
context = Context()
# Bind variables & add generators if pertinent
if myconfig.variable_binds:
context.bind_variables(myconfig.variable_binds)
if myconfig.generators:
for key, value in myconfig.generators.items():
context.add_generator(key, value)
# Make sure we actually have tests to execute
if not mytests and not mybenchmarks:
# no tests in this test set, probably just imports.. skip to next
# test set
break
myinteractive = True if myinteractive or myconfig.interactive else False
# Run tests, collecting statistics as needed
for test in mytests:
# Initialize the dictionaries to store test fail counts and results
if test.group not in group_results:
group_results[test.group] = list()
group_failure_counts[test.group] = 0
result = run_test(test, test_config=myconfig, context=context, curl_handle=curl_handle)
result.body = None # Remove the body, save some memory!
if not result.passed: # Print failure, increase failure counts for that test group
# Use result test URL to allow for templating
logger.error('Test Failed: ' + test.name + " URL=" + result.test.url +
" Group=" + test.group + " HTTP Status Code: " + str(result.response_code))
# Print test failure reasons
if result.failures:
for failure in result.failures:
log_failure(failure, context=context,
test_config=myconfig)
# Increment test failure counts for that group (adding an entry
# if not present)
failures = group_failure_counts[test.group]
failures = failures + 1
group_failure_counts[test.group] = failures
else: # Test passed, print results
logger.info('Test Succeeded: ' + test.name +
" URL=" + test.url + " Group=" + test.group)
# Add results for this test group to the resultset
group_results[test.group].append(result)
# handle stop_on_failure flag
if not result.passed and test.stop_on_failure is not None and test.stop_on_failure:
print(
'STOP ON FAILURE! stopping test set execution, continuing with other test sets')
break
for benchmark in mybenchmarks: # Run benchmarks, analyze, write
if not benchmark.metrics:
logger.debug('Skipping benchmark, no metrics to collect')
continue
logger.info("Benchmark Starting: " + benchmark.name +
" Group: " + benchmark.group)
benchmark_result = run_benchmark(
benchmark, myconfig, context=context)
print(benchmark_result)
logger.info("Benchmark Done: " + benchmark.name +
" Group: " + benchmark.group)
if benchmark.output_file: # Write file
logger.debug(
'Writing benchmark to file in format: ' + benchmark.output_format)
write_method = OUTPUT_METHODS[benchmark.output_format]
my_file = open(benchmark.output_file, 'w') # Overwrites file
logger.debug("Benchmark writing to file: " +
benchmark.output_file)
write_method(my_file, benchmark_result,
benchmark, test_config=myconfig)
my_file.close()
if myinteractive:
# a break for when interactive bits are complete, before summary data
print("===================================")
# Print summary results
for group in sorted(group_results.keys()):
test_count = len(group_results[group])
failures = group_failure_counts[group]
total_failures = total_failures + failures
passfail = {True: u'SUCCEEDED: ', False: u'FAILED: '}
output_string = "Test Group {0} {1}: {2}/{3} Tests Passed!".format(group, passfail[failures == 0], str(test_count - failures), str(test_count))
if myconfig.skip_term_colors:
print(output_string)
else:
if failures > 0:
print('\033[91m' + output_string + '\033[0m')
else:
print('\033[92m' + output_string + '\033[0m')
return total_failures
def register_extensions(modules):
""" Import the modules and register their respective extensions """
if isinstance(modules, basestring): # Catch supplying just a string arg
modules = [modules]
for ext in modules:
# Get the package prefix and final module name
segments = ext.split('.')
module = segments.pop()
package = '.'.join(segments)
# Necessary to get the root module back
module = __import__(ext, globals(), locals(), package)
# Extensions are registered by applying a register function to sets of
# registry name/function pairs inside an object
extension_applies = {
'VALIDATORS': validators.register_validator,
'COMPARATORS': validators.register_comparator,
'VALIDATOR_TESTS': validators.register_test,
'EXTRACTORS': validators.register_extractor,
'GENERATORS': generators.register_generator
}
has_registry = False
for registry_name, register_function in extension_applies.items():
if hasattr(module, registry_name):
registry = getattr(module, registry_name)
for key, val in registry.items():
register_function(key, val)
if registry:
has_registry = True
if not has_registry:
raise ImportError(
"Extension to register did not contain any registries: {0}".format(ext))
# AUTOIMPORTS, these should run just before the main method, to ensure
# everything else is loaded
try:
import jsonschema
register_extensions('pyresttest.ext.validator_jsonschema')
except ImportError as ie:
logging.debug(
"Failed to load jsonschema validator, make sure the jsonschema module is installed if you wish to use schema validators.")
try:
import jmespath
register_extensions('pyresttest.ext.extractor_jmespath')
except ImportError as ie:
logging.debug(
"Failed to load jmespath extractor, make sure the jmespath module is installed if you wish to use jmespath extractor.")
def main(args):
"""
Execute a test against the given base url.
Keys allowed for args:
url - REQUIRED - Base URL
test - REQUIRED - Test file (yaml)
print_bodies - OPTIONAL - print response body
print_headers - OPTIONAL - print response headers
log - OPTIONAL - set logging level {debug,info,warning,error,critical} (default=warning)
interactive - OPTIONAL - mode that prints info before and after test exectuion and pauses for user input for each test
absolute_urls - OPTIONAL - mode that treats URLs in tests as absolute/full URLs instead of relative URLs
skip_term_colors - OPTIONAL - mode that turn off the output term colors
"""
if 'log' in args and args['log'] is not None:
logger.setLevel(LOGGING_LEVELS.get(
args['log'].lower(), logging.NOTSET))
if 'import_extensions' in args and args['import_extensions']:
extensions = args['import_extensions'].split(';')
# We need to add current folder to working path to import modules
working_folder = args['cwd']
if working_folder not in sys.path:
sys.path.insert(0, working_folder)
register_extensions(extensions)
test_file = args['test']
test_structure = read_test_file(test_file)
my_vars = None
if 'vars' in args and args['vars'] is not None:
my_vars = yaml.safe_load(args['vars'])
if my_vars and not isinstance(my_vars, dict):
raise Exception("Variables must be a dictionary!")
# Set up base URL
base_url = args['url']
if 'absolute_urls' in args and args['absolute_urls']:
base_url = ''
tests = parse_testsets(base_url, test_structure,
working_directory=os.path.dirname(test_file), vars=my_vars)
# Override configs from command line if config set
for t in tests:
if 'print_bodies' in args and args['print_bodies'] is not None and bool(args['print_bodies']):
t.config.print_bodies = safe_to_bool(args['print_bodies'])
if 'print_headers' in args and args['print_headers'] is not None and bool(args['print_headers']):
t.config.print_headers = safe_to_bool(args['print_headers'])
if 'interactive' in args and args['interactive'] is not None:
t.config.interactive = safe_to_bool(args['interactive'])
if 'verbose' in args and args['verbose'] is not None:
t.config.verbose = safe_to_bool(args['verbose'])
if 'ssl_insecure' in args and args['ssl_insecure'] is not None:
t.config.ssl_insecure = safe_to_bool(args['ssl_insecure'])
if 'skip_term_colors' in args and args['skip_term_colors'] is not None:
t.config.skip_term_colors = safe_to_bool(args['skip_term_colors'])
# Execute all testsets
failures = run_testsets(tests)
sys.exit(failures)
def parse_command_line_args(args_in):
""" Runs everything needed to execute from the command line, so main method is callable without arg parsing """
parser = OptionParser(
usage="usage: %prog base_url test_filename.yaml [options] ")
parser.add_option(u"--print-bodies", help="Print all response bodies",
action="store", type="string", dest="print_bodies")
parser.add_option(u"--print-headers", help="Print all response headers",
action="store", type="string", dest="print_headers")
parser.add_option(u"--log", help="Logging level",
action="store", type="string")
parser.add_option(u"--interactive", help="Interactive mode",
action="store", type="string")
parser.add_option(
u"--url", help="Base URL to run tests against", action="store", type="string")
parser.add_option(u"--test", help="Test file to use",
action="store", type="string")
parser.add_option(u'--import_extensions',
help='Extensions to import, separated by semicolons', action="store", type="string")
parser.add_option(
u'--vars', help='Variables to set, as a YAML dictionary', action="store", type="string")
parser.add_option(u'--verbose', help='Put cURL into verbose mode for extra debugging power',
action='store_true', default=False, dest="verbose")
parser.add_option(u'--ssl-insecure', help='Disable cURL host and peer cert verification',
action='store_true', default=False, dest="ssl_insecure")
parser.add_option(u'--absolute-urls', help='Enable absolute URLs in tests instead of relative paths',
action="store_true", dest="absolute_urls")
parser.add_option(u'--skip_term_colors', help='Turn off the output term colors',
action='store_true', default=False, dest="skip_term_colors")
(args, unparsed_args) = parser.parse_args(args_in)
args = vars(args)
# Handle url/test as named, or, failing that, positional arguments
if not args['url'] or not args['test']:
if len(unparsed_args) == 2:
args[u'url'] = unparsed_args[0]
args[u'test'] = unparsed_args[1]
elif len(unparsed_args) == 1 and args['url']:
args['test'] = unparsed_args[0]
elif len(unparsed_args) == 1 and args['test']:
args['url'] = unparsed_args[0]
else:
parser.print_help()
parser.error(
"wrong number of arguments, need both url and test filename, either as 1st and 2nd parameters or via --url and --test")
# So modules can be loaded from current folder
args['cwd'] = os.path.realpath(os.path.abspath(os.getcwd()))
return args
def command_line_run(args_in):
args = parse_command_line_args(args_in)
main(args)
# Allow import into another module without executing the main method
if(__name__ == '__main__'):
command_line_run(sys.argv[1:])
|
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.web import authenticated, HTTPError
from wtforms import Form, StringField, validators
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_pet.handlers.api_proxy import user_jobs_get_req
from qiita_db.util import send_email
from qiita_db.user import User
from qiita_db.logger import LogEntry
from qiita_db.exceptions import QiitaDBUnknownIDError, QiitaDBError
from qiita_core.exceptions import IncorrectPasswordError
from qiita_core.util import execute_as_transaction
from qiita_core.qiita_settings import qiita_config
class UserProfile(Form):
name = StringField("Name", [validators.required()])
affiliation = StringField("Affiliation")
address = StringField("Address")
phone = StringField("Phone")
class UserProfileHandler(BaseHandler):
"""Displays user profile page and handles profile updates"""
@authenticated
def get(self):
profile = UserProfile()
profile.process(data=self.current_user.info)
self.render("user_profile.html", profile=profile, msg="", passmsg="")
@authenticated
@execute_as_transaction
def post(self):
passmsg = ""
msg = ""
user = self.current_user
action = self.get_argument("action")
if action == "profile":
# tuple of colmns available for profile
# FORM INPUT NAMES MUST MATCH DB COLUMN NAMES
form_data = UserProfile()
form_data.process(data=self.request.arguments)
profile = {name: data[0].decode('ascii') for name, data in
form_data.data.items()}
# Turn default value as list into default strings
for field in form_data:
field.data = field.data[0].decode('ascii')
try:
user.info = profile
msg = "Profile updated successfully"
except Exception as e:
msg = "ERROR: profile could not be updated"
LogEntry.create('Runtime', "Cound not update profile: %s" %
str(e), info={'User': user.id})
elif action == "password":
form_data = UserProfile()
form_data.process(data=user.info)
oldpass = self.get_argument("oldpass")
newpass = self.get_argument("newpass")
try:
changed = user.change_password(oldpass, newpass)
except Exception as e:
passmsg = "ERROR: could not change password"
LogEntry.create('Runtime', "Could not change password: %s" %
str(e), info={'User': user.id})
else:
if changed:
passmsg = "Password changed successfully"
else:
passmsg = "Incorrect old password"
self.render("user_profile.html", user=user.id, profile=form_data,
msg=msg, passmsg=passmsg)
class ForgotPasswordHandler(BaseHandler):
"""Displays forgot password page and generates code for lost passwords"""
def get(self):
self.render("lost_pass.html", user=None, message="", level="")
@execute_as_transaction
def post(self):
message = ""
level = ""
page = "lost_pass.html"
user_id = None
try:
user = User(self.get_argument("email"))
except QiitaDBUnknownIDError:
message = "ERROR: Unknown user."
level = "danger"
else:
user_id = user.id
user.generate_reset_code()
info = user.info
try:
# qiita_config.base_url doesn't have a / at the end, but the
# qiita_config.portal_dir has it at the beginning but not at
# the end. This constructs the correct URL
url = qiita_config.base_url + qiita_config.portal_dir
send_email(user.id, "Qiita: Password Reset", "Please go to "
"the following URL to reset your password: \n"
"%s/auth/reset/%s \nYou "
"have 30 minutes from the time you requested a "
"reset to change your password. After this period, "
"you will have to request another reset." %
(url, info["pass_reset_code"]))
message = ("Check your email for the reset code.")
level = "success"
page = "index.html"
except Exception as e:
message = ("Unable to send email. Error has been registered. "
"Your password has not been reset.")
level = "danger"
LogEntry.create('Runtime', "Unable to send forgot password "
"email: %s" % str(e), info={'User': user.id})
self.render(page, user=user_id, message=message, level=level)
class ChangeForgotPasswordHandler(BaseHandler):
"""Displays change password page and handles password reset"""
def get(self, code):
self.render("change_lost_pass.html", user=None, message="",
level="", code=code)
@execute_as_transaction
def post(self, code):
message = ""
level = ""
page = "change_lost_pass.html"
user = None
try:
user = User(self.get_argument("email"))
except QiitaDBUnknownIDError:
message = "Unable to reset password"
level = "danger"
else:
newpass = self.get_argument("newpass")
try:
changed = user.change_forgot_password(code, newpass)
except IncorrectPasswordError:
message = "The new password is not valid. Try again."
changed = False
except QiitaDBError:
message = "Invalid code. Request a new one."
changed = False
if changed:
message = ("Password reset successful. Please log in to "
"continue.")
level = "success"
page = "index.html"
else:
if message != "":
message = ("Unable to reset password. Most likely your "
"email is incorrect or your reset window has "
"timed out.")
level = "danger"
self.render(page, message=message, level=level, code=code)
class UserMessagesHander(BaseHandler):
@authenticated
def get(self):
self.render("user_messages.html",
messages=self.current_user.messages())
def post(self):
action = self.get_argument("action")
messages = self.get_arguments("messages")
if len(messages) == 0:
HTTPError(400, "No messages passed")
if action == "read":
self.current_user.mark_messages(messages, read=True)
elif action == "unread":
self.current_user.mark_messages(messages, read=False)
elif action == "delete":
self.current_user.delete_messages(messages)
else:
raise HTTPError(400, reason="Unknown action: %s" % action)
self.render("user_messages.html",
messages=self.current_user.messages())
class UserJobs(BaseHandler):
@authenticated
def get(self):
response = user_jobs_get_req(self.current_user)
self.write(response)
|
|
#!/usr/bin/env python
import sys, platform
from Tkinter import *
import tkFont, tkMessageBox
from tkFileDialog import askopenfilename
### imports for connections
import socket,ssl,base64, RSAKeyHandling, os, json
from M2Crypto import DH,RSA,Rand
from binascii import hexlify
import pickle
from base64 import b64decode
#==============================================================
# define global variables
# define window parameters
WINDOW_WIDTH_MS = 500
WINDOW_HEIGHT_MS = 100
WINDOW_WIDTH_ES = 600
WINDOW_WIDTH_AUTH = 650
WINDOW_HEIGHT_AUTH = 500
WINDOW_WIDTH_MAIN = 760
WINDOW_HEIGHT_MAIN = 500
# authentication parameters
privateKeyFile = None
privateRSAKey = None
voterPIN = None
maxPINLength = 4
voterID = None
voterIDLength = 10
# possibleSelectionValues = (u"\u03B1", u"\u03B2", u"\u03B3", u"\u03B4", u"\u03B5") # alpha, beta, gamma, delta, epsilon
possibleSelectionValues = ("Alpha", "Beta", "Gamma", "Delta", "Epsilon") # alpha, beta, gamma, delta, epsilon
userVote = []
#==============================================================
#==============================================================
# define a stack to hold checkbutton and radiobutton widgets
#==============================================================
class Stack:
def __init__(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items(pop())
def length(self):
return len(self.items)
def hasElement(self, element):
return (element in self.items)
def getElement(self, index):
return self.items[index]
def deleteFirst(self):
_r = self.items[0]
del self.items[0]
return _r
def deleteElement(self, element):
if self.hasElement(element):
self.items.remove(element)
def showStack(self):
for i in range(0, len(self.items)):
print "Element %d: %d" % (i, self.items[i])
#==============================================================
#==============================================================
# define class MainScreen
class MainScreen(Tk):
def createWidgets(self):
# create heading
myriadHeadingFont = tkFont.Font(family="Myriad Pro",size=28)
self.noteLabel = Label(self,
text = "Press start to begin your voting session.",
font = myriadHeadingFont)
x = (WINDOW_WIDTH_MS - self.noteLabel.winfo_reqwidth())/2
self.noteLabel.place(x = (WINDOW_WIDTH_MS - self.noteLabel.winfo_reqwidth())/2, y = 10)
def startVoting():
# print "Authenticate --->"
self.destroy()
#create button to go next
self.startButton = Button(self, text = "Start", command = startVoting)
self.startButton.place(x = (WINDOW_WIDTH_MS - self.startButton.winfo_reqwidth())/2, y = 55)
def __init__(self, master=None):
Tk.__init__(self)
self.createWidgets()
#==============================================================
class EndScreen(Tk):
def createWidgets(self):
# create heading
myriadHeadingFont = tkFont.Font(family="Myriad Pro",size=28)
self.noteLabel = Label(self,
text = "Thank you for voting. Now please move on!",
font = myriadHeadingFont)
x = (WINDOW_WIDTH_ES - self.noteLabel.winfo_reqwidth())/2
self.noteLabel.place(x = (WINDOW_WIDTH_ES - self.noteLabel.winfo_reqwidth())/2, y = 10)
def endVoting():
sys.exit()
#create button to go next
self.doneButton = Button(self, text = "Done", command = endVoting)
self.doneButton.place(x = (WINDOW_WIDTH_ES - self.doneButton.winfo_reqwidth())/2, y = 55)
def __init__(self, master=None):
Tk.__init__(self)
self.createWidgets()
#==============================================================
#==============================================================
# define class AuthScreen
class AuthScreen(Tk):
def createWidgets(self):
#fonts_here
myriadHeadingFont = tkFont.Font(family = "Myriad Pro",size = 60)
myriadRequestFont = tkFont.Font(family = "Myriad Pro",size = 24)
myriadInputFont = tkFont.Font(family = "Myriad Pro", size = 16)
myriadFileFont = tkFont.Font(family = "Myriad Pro",size = 14)
#create heading
self.noteLabel = Label(self, text = "Login to Vote", font = myriadHeadingFont)
self.noteLabel.place(x = (WINDOW_WIDTH_AUTH - self.noteLabel.winfo_reqwidth())/2, y = 20)
#create label and input for VoterID
self.voterIDLabel = Label(self, text = "Enter your VoterID:", font = myriadRequestFont)
self.voterIDEntryInput = Entry(self, width = 20)
#create label and input for private key
fileStatusVar = StringVar()
fileStatusVar.set("No file selected")
self.qKeyLabel = Label(self, text = "Enter private key file", font = myriadRequestFont)
self.keyButton = Button(self, text = "Open private key file", width = 16,
command = lambda i=fileStatusVar: self.getPrivateKeyFile(i))
self.keyFileLabel = Label(self, textvariable = fileStatusVar, font = myriadFileFont)
# self.privateKeyFile = askopenfilename(initialdir = './privatekeys', filetypes=(("Certificate", "*.pem"),))
#create label and input for PIN
self.PINLabel = Label(self, text = "Enter your passcode:", font = myriadRequestFont)
limitCheck = StringVar() #limit to number of characters in the PIN field
def _on_write(*args):
s = limitCheck.get()
if len(s) > maxPINLength:
limitCheck.set(s[:maxPINLength])
limitCheck.trace_variable("w", _on_write)
self.PINInput = Entry(self, width = 20, textvariable = limitCheck)
self.PINInput.config(show = "*")
# placing
_sp = 100
_spAlign = _sp - 2
self.voterIDLabel.place(x = _spAlign, y = 120)
self.voterIDEntryInput.place(x = _sp, y = 160)
self.qKeyLabel.place(x = _spAlign, y = 220)
self.keyButton.place(x = _sp, y = 260)
self.keyFileLabel.place(x = _sp + self.keyButton.winfo_reqwidth() + 20, y = 260 + 3)
_y = 260 + 50
self.PINLabel.place(x = _spAlign, y = _y)
self.PINInput.place(x = _sp, y = _y + 50)
#create button to toggle visibility
self.showPIN = Button(self, text = "Toggle PIN", command = self.togglePINVisibility)
self.showPIN.place(x = _sp + self.PINInput.winfo_reqwidth() + 10, y = _y + 50)
#create button for login
self.loginButton = Button(self, text = "Login", width = 5, command = self.login)
self.loginButton.place(x = _sp, y = _y + 90)
#create button to quit program
self.quitButton = Button(self, text = "Exit", width = 5, command = self.exitProgram)
self.quitButton.place(x = _sp + self.loginButton.winfo_reqwidth() + 5, y = _y + 90)
def __init__(self, master=None):
Tk.__init__(self)
self.createWidgets()
def getPrivateKeyFile(self, var):
global privateKeyFile
privateKeyFile = askopenfilename(initialdir = './privatekeys', filetypes=(("Private key", "*.pem"),))
if privateKeyFile != "":
var.set("Private key file selected")
else:
var.set("Click button to select file")
def togglePINVisibility(self):
if self.PINInput['show'] == "":
self.PINInput.config(show = "*") # hide PIN
else:
self.PINInput.config(show = "") # show PIN
def check(self):
# check for RSA key, voterID, PIN value here
if len(self.voterIDEntryInput.get()) != voterIDLength:
tkMessageBox.showwarning(title = "Login",
message = "Please enter a valid voterID.\nVoterID must be %d characters long." % (voterIDLength))
return False
if privateKeyFile == "" or privateKeyFile == None:
tkMessageBox.showwarning(title = "Login",
message = "Please select a valid private key file")
return False
if len(self.PINInput.get()) != maxPINLength:
tkMessageBox.showwarning(title = "Login",
message = "Length of Passcode is not %d characters." % (maxPINLength))
return False
def login(self):
if (self.check() == False): # check if values are appropriate
return
global voterID, privateRSAKey, voterPIN
voterID = self.voterIDEntryInput.get()
privateRSAKey = privateKeyFile
voterPIN = self.PINInput.get()
self.destroy()
def exitProgram(self):
sys.exit()
#==============================================================
#==============================================================
# define class ChoiceScreen
class Group(Tk):
'''This is the docline'''
voteFor = "" # voteFor = "president"|"congress"|"counsel"
MAX_SELECTIONS = 0
MAX_OPTIONS = 0
selection = ()
def createWidgets(self, _vf, _ms, _mo):
# get constructor variables
voteFor = _vf
MAX_SELECTIONS = _ms
MAX_OPTIONS = _mo
myriadHeadingFont = None
atypeOptionFont = None
optionSelectedFont = None
if platform.system() == 'Darwin':
myriadHeadingFont = tkFont.Font(family = "Myriad Pro",size = 60)
atypeOptionFont = tkFont.Font(family = "Calibri", size = 30)
optionSelectedFont = tkFont.Font(family = "Calibri", size = 30, slant = "italic", weight = "bold")
elif platform.system() == 'Windows':
myriadHeadingFont = tkFont.Font(family = "Helvetica",size = 60)
atypeOptionFont = tkFont.Font(family = "Helvetica", size = 20)
optionSelectedFont = tkFont.Font(family = "Helvetica", size = 20, slant = "italic", weight = "bold")
elif platform.system() == 'Linux':
myriadHeadingFont = tkFont.Font(family = "Helvetica",size = 60)
atypeOptionFont = tkFont.Font(family = "Helvetica", size = 20)
optionSelectedFont = tkFont.Font(family = "Helvetica", size = 20, slant = "italic", weight = "bold")
# myriadHeadingFont = tkFont.Font(family = "Myriad Pro",size = 60)
# atypeOptionFont = tkFont.Font(family = "Calibri", size = 175)
# optionSelectedFont = tkFont.Font(family = "Calibri", size = 175, slant = "italic", weight = "bold")
# create heading
self.noteLabel = Label(self,
text = "Select Your %s" % (voteFor),
font = myriadHeadingFont)
x = (WINDOW_WIDTH_MAIN - self.noteLabel.winfo_reqwidth())/2
self.noteLabel.place(x = (WINDOW_WIDTH_MAIN - self.noteLabel.winfo_reqwidth())/2, y = 20)
# create presidents ------------- OLD ALGORITHM | #sigmatag = 0
# self.presidents = [0]*maxPresidents
# presidentLabelTexts = (u"\u03B1", u"\u03B2", u"\u03B3") #alpha, beta, gamma --- greek small
# for i in range(0, len(presidentLabelTexts)):
# self.presidents[i] = Label(self, text = presidentLabelTexts[i], font = at175)
# _x = WINDOW_WIDTH_MAIN/4*(i+1) - self.presidents[i].winfo_reqwidth()/2
# self.presidents[i].place(x = _x, y=125)
# if i == 2:
# self.presidents[i].place(x = _x, y=108)
# setup radio/checkbutton list and stack
self.options = [0]*MAX_OPTIONS
valueLabels = possibleSelectionValues[0:MAX_OPTIONS]
s = Stack()
# create variables for checkbuttons and radiobuttons
varList = [0]*MAX_OPTIONS
for i in range(0, len(varList)):
varList[i] = IntVar()
radioVar = IntVar()
# Algorithm selectionRadio: #sigmatag = 1
# value = which radiobutton
# if element in stack:
# return
#
# Radiobutton[value].font = boldized_font
# stack.push(value)
# if length(stack) > 1:
# Radiobutton[0].font = original_font
def selectionRadio():
index = radioVar.get() - 1
if s.hasElement(index):
return
self.options[index]['font'] = optionSelectedFont
s.push(index)
if s.length() > 1:
self.options[s.deleteFirst()]['font'] = atypeOptionFont
# Algorithm selectionCheck: #sigmatag = 1
# value = checked or unchecked
# if value is checked:
# Checkbutton[index].font = boldized_font
# stack.push(index)
# else:
# Checkbutton[index].font = original_font
# stack.delete(index) //delete by value
#
# if length(stack) > MAX_SELECTIONS:
# stack.delete(0) //delete by index
# Checkbutton[index].font = original_font
# Checkbutton[index].deselect()
def selectionCheck(index):
value = varList[index].get()
if value == 1:
self.options[index]['font'] = optionSelectedFont
s.push(index)
else:
self.options[index]['font'] = atypeOptionFont
s.deleteElement(index)
if s.length() > MAX_SELECTIONS:
_first = s.deleteFirst()
self.options[_first]['font'] = atypeOptionFont
self.options[_first].deselect()
def underVote():
value = tkMessageBox.askquestion(title = "What?",
message = "You haven't voted properly. Do you want to move to the next section?")
if value == "yes":
return True
else:
return False
def confirmSelection():
global userVote
# if s.length != 0:
# tkMessageBox.showwarning(title = "Incomplete Vote",
# message = "You have not voted "
# if s.length != MAX_SELECTIONS:
# tkMessageBox.showwarning(title = "Incomplete Vote",
# message = "You've chosen only" % (voterIDLength))
underVoteOK = "OK"
if s.length() < MAX_SELECTIONS:
underVoteOK = underVote()
if underVoteOK == False:
return
for index in range(0, s.length()):
userVote.append(s.getElement(index))
self.destroy()
return
def skipSection():
value = tkMessageBox.askquestion(title = "What?",
message = "Do you really want to skip?")
if value == 'yes':
self.destroy()
# create options list for display in GUI
for index in range(0, MAX_OPTIONS):
if MAX_SELECTIONS > 1:
self.options[index] = Checkbutton(self, text = valueLabels[index], anchor = W, font = atypeOptionFont,
variable = varList[index], command = lambda i=index: selectionCheck(i))
else:
self.options[index] = Radiobutton(self, text = valueLabels[index], anchor = W, font = atypeOptionFont,
variable = radioVar, value = index+1, command = selectionRadio)
_x = WINDOW_WIDTH_MAIN/(MAX_OPTIONS+1)*(index+1) - self.options[index].winfo_reqwidth()/2
self.options[index].place(x = _x, y=150)
# add skip button
self.skipButton = Button(self, text = "Skip", width = "7", command = skipSection)
self.skipButton.place(x = WINDOW_WIDTH_MAIN/2 - self.skipButton.winfo_reqwidth(), y = WINDOW_HEIGHT_MAIN - 60)
# add confirm button
self.confirmButton = Button(self, text = "Confirm", width = "7", command = confirmSelection)
self.confirmButton.place(x = WINDOW_WIDTH_MAIN/2, y = WINDOW_HEIGHT_MAIN - 60)
#create button to quit program
self.quitButton = Button(self, text = "Exit", width = 5, command = self.exitProgram)
self.quitButton.place(x = WINDOW_WIDTH_MAIN - 10 - self.quitButton.winfo_reqwidth(),
y = WINDOW_HEIGHT_MAIN - 10 - self.quitButton.winfo_reqheight())
def __init__(self, _vf, _ms, _mo, master=None):
Tk.__init__(self)
self.createWidgets(_vf, _ms, _mo)
def exitProgram(self):
sys.exit()
#==============================================================
#==============================================================
def centerWindow(window, _width, _height):
# get screen attributes
screenWidth = window.winfo_screenwidth()
screenHeight = window.winfo_screenheight()
#get coordinates
x = screenWidth/2 - _width/2
y = screenHeight/2 - _height/2
window.geometry('%dx%d+%d+%d' % (_width, _height, x, y))
window.resizable(0,0)
def main():
# seeding the PRNG with 1024 random bytes from OS
# from M2Crypto
Rand.rand_seed (os.urandom (1024))
while 1:
#======================================================
# draw MAIN SCREEN
mainScreen = MainScreen()
centerWindow(mainScreen, WINDOW_WIDTH_MS, WINDOW_HEIGHT_MS)
mainScreen.mainloop()
#======================================================
### begin connecting to the srver
# buffer length
buffer_length = 5000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# require a certificate from the server
myhost = 'localhost'
myport = 4321
try:
# ssl.CERT_NONE : cause we are using a self signed certificate
ssl_sock = ssl.wrap_socket(s,cert_reqs=ssl.CERT_NONE,ssl_version=ssl.PROTOCOL_TLSv1)
ssl_sock.connect((myhost, myport))
#print repr(ssl_sock.getpeername())
#print ssl_sock.cipher()
#begin to receive DH key exchange data from server
#in order of p,g,g^a
serverDH_p = base64.b64decode(ssl_sock.read(buffer_length))
serverDH_g = base64.b64decode(ssl_sock.read(buffer_length))
serverDH_pub = base64.b64decode(ssl_sock.read(buffer_length))
myDHobject = DH.set_params(serverDH_p, serverDH_g)
# pick random p and generate g^b in myDhobject.pub
myDHobject.gen_key()
ssl_sock.sendall(base64.b64encode(myDHobject.pub))
# generate shared AES Key
sharedAESkey = myDHobject.compute_key(serverDH_pub)
# print 'shared AES Key ', hexlify(sharedAESkey)
# now we have a secure shared 256-bit AES key to send data around
# it was Diffie Hellman, so even if TLS was borked, hopefully noone knows it
except:
#ZZZ change to msgbox
tkMessageBox.showwarning(title = "Connection Error",
message = "Cannot connect to server.")
ssl_sock.close()
# mainScreen.destroy()
# print 'Cannot connect to server', myhost , ':' , myport
continue
#======================================================
# draw AUTHENTICATION SCREEN
authScreen = AuthScreen()
centerWindow(authScreen, WINDOW_WIDTH_AUTH, WINDOW_HEIGHT_AUTH)
authScreen.mainloop()
# voterID, privateRSAKey and PIN are valid
#======================================================
# start validating login
# get the chosen IV in base64
chosen_IV_inbase64 = ssl_sock.read(buffer_length)
# decode it from base64
chosen_IV = b64decode(chosen_IV_inbase64)
# print 'got chosen_IV ', hexlify(chosen_IV)
# voterID || PIN
voterID_PIN = voterID + voterPIN
# print 'voterID_PIN ', str(voterID_PIN)
# calculate sha256 hash of voterID || PIN in base64
hash_of_voterID_PIN_inbase64 = RSAKeyHandling.sha256hash_base64(voterID_PIN)
# print 'hash of voterID_PIN in base 64 ', hash_of_voterID_PIN_inbase64
# encrypt it using AES 256
# key = sharedAESKey
# IV = chosen_IV
encrypted_AES_hash = RSAKeyHandling.AES_encryptor(sharedAESkey, hash_of_voterID_PIN_inbase64, chosen_IV)
# convert it into base64
encrypted_AES_hash_inbase64 = base64.b64encode(encrypted_AES_hash)
# send it to the server
ssl_sock.sendall(encrypted_AES_hash_inbase64)
# print 'sent to server encrypted_AES_hash_inbase64 ', encrypted_AES_hash_inbase64
# wait for server to return user_exists or user_has_voted
user_exists_base64 = ssl_sock.read(buffer_length)
# decode it from base64
user_exists = base64.b64decode(user_exists_base64)
# print hexlify(user_exists)
# decrypt it from AES using sharedAESkey and chosenIV
user_exists = RSAKeyHandling.AES_decryptor(sharedAESkey, user_exists, chosen_IV)
# print user_exists
if user_exists == 'LOL_NO_WAY':
# ZZZ change to msgbox
tkMessageBox.showerror(title = "Not Eligible User",
message = "Sorry, User Not Eligible to Vote")
#print 'Sorry, user not eligible to vote'
ssl_sock.close()
continue
## ZZZ restart GUI , how ?
# if user is eligible to vote
# load privatekey
rsakey = RSA.load_key(privateRSAKey, RSAKeyHandling.empty_callback)
try:
# user_exists must contain the hash_normal encrypted with public key
# decrypt it
decrypted_hash = rsakey.private_decrypt(user_exists, RSA.pkcs1_padding)
except:
# decryption didn't work
# ZZZ change to msgbox
tkMessageBox.showerror(title = "Decyption Error",
message = "Sorry, Wrong User Credentials")
ssl_sock.close()
continue
## ZZZ restart GUI , how ?
if decrypted_hash != hash_of_voterID_PIN_inbase64:
# ZZZ change to msgbox
tkMessageBox.showerror(title = "Decryption Error",
message = "Sorry, Wrong User Credentials")
# print 'Sorry, wrong user credentials'
ssl_sock.close()
continue
# sys.exit()
# now the user is authenticated and we can go on
# start voting
#======================================================
#draw choice screen for president/congress/counsel/
polls = {
"president" : (1, 3),
"congress" : (1, 5),
"counsel" : (2, 4)
}
votes = {
"president" : None,
"congress" : None,
"counsel" : None
}
for poll in polls:
window = Group(poll, polls[poll][0], polls[poll][1]) # def __init__(self, _vf, _ms, _mo, master=None):
centerWindow(window, WINDOW_WIDTH_MAIN, WINDOW_HEIGHT_MAIN)
window.mainloop()
votes[poll] = tuple(userVote) # store user vote
del userVote[:] # clear user vote
# send the votes to server
# print votes
votes_string = json.dumps(votes)
# convert votes to base64
votes_string_inbase64 = base64.b64encode(votes_string)
# to load it later
# votes_n = json.loads(vote_str)
# begin to encrypt votes
encrypted_votes_string = RSAKeyHandling.AES_encryptor(sharedAESkey, votes_string_inbase64, chosen_IV)
# convert it to base64
encrypted_votes_string_inbase64 = base64.b64encode(encrypted_votes_string)
# send it to the server
ssl_sock.sendall(encrypted_votes_string_inbase64)
# wait for the thank you note
encrypted_thankyou_inbase64 = ssl_sock.read(buffer_length)
# decode it from base64
encrypted_thankyou = base64.b64decode(encrypted_thankyou_inbase64)
# decrypt it using AES
decrypted_thankyou = RSAKeyHandling.AES_decryptor(sharedAESkey, encrypted_thankyou, chosen_IV)
print decrypted_thankyou
# draw END SCREEN
endScreen = EndScreen()
centerWindow(endScreen, WINDOW_WIDTH_ES, WINDOW_HEIGHT_MS)
endScreen.mainloop()
# note that closing the SSLSocket will also close the underlying socket
ssl_sock.close()
# print 'socket closed'
# end of while
# end
if __name__ == "__main__":
main()
|
|
import re
import os
import json
from six.moves.urllib.parse import quote, unquote, urlencode
from bottle import Bottle, request, HTTPError, response, HTTPResponse, redirect
import requests
from warcio.timeutils import timestamp_to_iso_date
from pywb.utils.loaders import load_yaml_config
from pywb.rewrite.wburl import WbUrl
from pywb.rewrite.cookies import CookieTracker
from pywb.apps.rewriterapp import RewriterApp
from pywb.utils.wbexception import WbException
from webrecorder.basecontroller import BaseController, wr_api_spec
from webrecorder.load.wamloader import WAMLoader
from webrecorder.utils import get_bool
from webrecorder.models.dynstats import DynStats
from webrecorder.models.stats import Stats
# ============================================================================
class ContentController(BaseController, RewriterApp):
DEF_REC_NAME = 'Recording Session'
WB_URL_RX = re.compile('(([\d*]*)([a-z]+_|[$][a-z0-9:.-]+)?/)?([a-zA-Z]+:)?//.*')
MODIFY_MODES = ('record', 'patch', 'extract')
BUNDLE_PREFIX = '/static/bundle/'
def __init__(self, *args, **kwargs):
BaseController.__init__(self, *args, **kwargs)
config = kwargs['config']
self.content_error_redirect = os.environ.get('CONTENT_ERROR_REDIRECT')
config['csp-header'] = self.get_csp_header()
self.browser_mgr = kwargs['browser_mgr']
self.solr_mgr = kwargs.get('solr_mgr')
RewriterApp.__init__(self,
framed_replay=True,
jinja_env=kwargs['jinja_env'],
config=config)
self.paths = config['url_templates']
self.cookie_tracker = CookieTracker(self.redis)
self.record_host = os.environ['RECORD_HOST']
self.live_host = os.environ['WARCSERVER_HOST']
self.replay_host = os.environ.get('WARCSERVER_PROXY_HOST')
if not self.replay_host:
self.replay_host = self.live_host
self.session_redirect_host = os.environ.get('SESSION_REDIRECT_HOST')
self.session_share_origin = os.environ.get('SESSION_SHARE_ORIGIN', '')
self.wam_loader = WAMLoader()
self._init_client_archive_info()
self.dyn_stats = DynStats(self.redis, config)
def _init_client_archive_info(self):
self.client_archives = {}
for pk, archive in self.wam_loader.replay_info.items():
info = {'name': archive['name'],
'about': archive['about'],
'prefix': archive['replay_prefix'],
}
if archive.get('parse_collection'):
info['parse_collection'] = True
self.client_archives[pk] = info
def get_csp_header(self):
csp = "default-src 'unsafe-eval' 'unsafe-inline' 'self' data: blob: mediastream: ws: wss: "
if self.app_host and self.content_host != self.app_host:
csp += self.app_host + '/_set_session'
if self.content_error_redirect:
csp += ' ' + self.content_error_redirect
csp += "; form-action 'self'"
return csp
def init_routes(self):
wr_api_spec.set_curr_tag('External Archives')
@self.app.get('/api/v1/client_archives')
def get_client_archives():
return self.client_archives
wr_api_spec.set_curr_tag('Browsers')
@self.app.get('/api/v1/create_remote_browser')
def create_browser():
""" Api to launch remote browser instances
"""
sesh = self.get_session()
if sesh.is_new() and self.is_content_request():
self._raise_error(403, 'invalid_browser_request')
browser_id = request.query['browser']
Stats(self.redis).incr_browser(browser_id)
user = self.get_user(redir_check=False)
data = request.query
coll_name = data.getunicode('coll', '')
rec = data.get('rec', '')
mode = data.get('mode', '')
url = data.getunicode('url', '')
timestamp = data.get('timestamp', '')
sources = ''
inv_sources = ''
patch_rec = ''
collection = user.get_collection_by_name(coll_name)
recording = collection.get_recording(rec)
if not collection:
self._raise_error(404, 'no_such_collection')
if mode == 'extract':
# Extract from All, Patch from None
sources = '*'
inv_sources = '*'
elif mode.startswith('extract:'):
# Extract from One, Patch from all but one
sources = mode.split(':', 1)[1]
inv_sources = sources
# load patch recording also
#patch_recording = collection.get_recording(recording['patch_rec'])
if recording:
patch_rec = recording.get_prop('patch_rec')
mode = 'extract'
elif mode.startswith('extract_only:'):
# Extract from one only, no patching
sources = mode.split(':', 1)[1]
inv_sources = '*'
mode = 'extract'
if mode in self.MODIFY_MODES:
if not recording:
return self._raise_error(404, 'no_such_recording')
#rec = recording.my_id
elif mode in ('replay', 'replay-coll'):
rec = '*'
else:
return self._raise_error(400, 'invalid_mode')
browser_can_write = '1' if self.access.can_write_coll(collection) else '0'
remote_ip = self._get_remote_ip()
# build kwargs
kwargs = dict(user=user.name,
id=sesh.get_id(),
coll=collection.my_id,
rec=rec,
coll_name=quote(coll_name),
#rec_name=quote(rec_name, safe='/*'),
type=mode,
sources=sources,
inv_sources=inv_sources,
patch_rec=patch_rec,
remote_ip=remote_ip,
ip=remote_ip,
browser=browser_id,
url=url,
timestamp=timestamp,
browser_can_write=browser_can_write)
data = self.browser_mgr.request_new_browser(kwargs)
if 'error_message' in data:
self._raise_error(400, data['error_message'])
return data
# UPDATE REMOTE BROWSER CONFIG
@self.app.get('/api/v1/update_remote_browser/<reqid>')
def update_remote_browser(reqid):
user, collection = self.load_user_coll(api=True)
timestamp = request.query.getunicode('timestamp')
type_ = request.query.getunicode('type')
# if switching mode, need to have write access
# for timestamp, only read access
if type_:
self.access.assert_can_write_coll(collection)
else:
self.access.assert_can_read_coll(collection)
return self.browser_mgr.update_remote_browser(reqid,
type_=type_,
timestamp=timestamp)
# REDIRECTS
@self.app.route('/record/<wb_url:path>', method='ANY')
def redir_new_temp_rec(wb_url):
coll_name = 'temp'
rec_title = self.DEF_REC_NAME
wb_url = self.add_query(wb_url)
return self.do_create_new_and_redir(coll_name, rec_title, wb_url, 'record')
@self.app.route('/$record/<coll_name>/<rec_title>/<wb_url:path>', method='ANY')
def redir_new_record(coll_name, rec_title, wb_url):
wb_url = self.add_query(wb_url)
return self.do_create_new_and_redir(coll_name, rec_title, wb_url, 'record')
# API NEW
wr_api_spec.set_curr_tag('Recordings')
@self.app.post('/api/v1/new')
def api_create_new():
self.redir_host()
url = request.json.get('url')
coll = request.json.get('coll')
mode = request.json.get('mode')
desc = request.json.get('desc', '')
browser = request.json.get('browser')
is_content = request.json.get('is_content') and not browser
timestamp = request.json.get('timestamp')
wb_url = self.construct_wburl(url, timestamp, browser, is_content)
host = self.content_host if is_content else self.app_host
if not host:
host = request.urlparts.netloc
full_url = request.environ['wsgi.url_scheme'] + '://' + host
url, rec, patch_rec = self.do_create_new(coll, '', wb_url, mode, desc=desc)
full_url += url
return {'url': full_url,
'user': self.access.session_user.name,
'rec_name': rec,
'patch_rec_name': patch_rec
}
# COOKIES
wr_api_spec.set_curr_tag('Cookies')
@self.app.post('/api/v1/auth/cookie')
def add_cookie():
user, collection = self.load_user_coll()
data = request.json or {}
rec_name = data.get('rec', '*')
recording = collection.get_recording(rec_name)
name = data.get('name')
value = data.get('value')
domain = data.get('domain')
if not domain:
return self._raise_error(400, 'domain_missing')
self.add_cookie(user, collection, recording, name, value, domain)
return {'success': domain}
# PROXY
@self.app.route('/_proxy/<url:path>', method='ANY')
def do_proxy(url):
return self.do_proxy(url)
# PROXY with CORS
@self.app.route('/proxy-fetch/<url:path>', method='GET')
def do_proxy_fetch_cors(url):
res = self.do_proxy(url)
if 'HTTP_ORIGIN' in request.environ:
self.set_options_headers(None, None, res)
return res
wr_api_spec.set_curr_tag('Add External Records')
@self.app.route('/api/v1/remote/put-record', method='PUT')
def do_put_record():
return self.do_put_record()
# LIVE DEBUG
#@self.app.route('/live/<wb_url:path>', method='ANY')
def live(wb_url):
request.path_shift(1)
return self.handle_routing(wb_url, user='$live', coll='temp', rec='', type='live')
# EMDED
@self.app.route('/_embed/<user>/<coll>/<wb_url:path>', method='ANY')
def embed_replay(user, coll, wb_url):
request.path_shift(3)
#return self.do_replay_coll_or_rec(user, coll, wb_url, is_embed=True)
return self.handle_routing(wb_url, user, coll, '*', type='replay-coll',
is_embed=True)
# DISPLAY
@self.app.route('/_embed_noborder/<user>/<coll>/<wb_url:path>', method='ANY')
def embed_replay(user, coll, wb_url):
request.path_shift(3)
#return self.do_replay_coll_or_rec(user, coll, wb_url, is_embed=True,
# is_display=True)
return self.handle_routing(wb_url, user, coll, '*', type='replay-coll',
is_embed=True, is_display=True)
# CONTENT ROUTES
# Record
@self.app.route('/<user>/<coll>/<rec>/record/<wb_url:path>', method='ANY')
def do_record(user, coll, rec, wb_url):
request.path_shift(4)
return self.handle_routing(wb_url, user, coll, rec, type='record', redir_route='record')
# Patch
@self.app.route('/<user>/<coll>/<rec>/patch/<wb_url:path>', method='ANY')
def do_patch(user, coll, rec, wb_url):
request.path_shift(4)
return self.handle_routing(wb_url, user, coll, rec, type='patch', redir_route='patch')
# Extract
@self.app.route('/<user>/<coll>/<rec>/extract\:<archive>/<wb_url:path>', method='ANY')
def do_extract_patch_archive(user, coll, rec, wb_url, archive):
request.path_shift(4)
return self.handle_routing(wb_url, user, coll, rec, type='extract',
sources=archive,
inv_sources=archive,
redir_route='extract:' + archive)
@self.app.route('/<user>/<coll>/<rec>/extract_only\:<archive>/<wb_url:path>', method='ANY')
def do_extract_only_archive(user, coll, rec, wb_url, archive):
request.path_shift(4)
return self.handle_routing(wb_url, user, coll, rec, type='extract',
sources=archive,
inv_sources='*',
redir_route='extract_only:' + archive)
@self.app.route('/<user>/<coll>/<rec>/extract/<wb_url:path>', method='ANY')
def do_extract_all(user, coll, rec, wb_url):
request.path_shift(4)
return self.handle_routing(wb_url, user, coll, rec, type='extract',
sources='*',
inv_sources='*',
redir_route='extract')
# REPLAY
# Replay List
@self.app.route('/<user>/<coll>/list/<list_id>/<bk_id>/<wb_url:path>', method='ANY')
def do_replay_rec(user, coll, list_id, bk_id, wb_url):
request.path_shift(5)
return self.handle_routing(wb_url, user, coll, '*', type='replay-coll')
# Replay Recording
@self.app.route('/<user>/<coll>/<rec>/replay/<wb_url:path>', method='ANY')
def do_replay_rec(user, coll, rec, wb_url):
request.path_shift(4)
return self.handle_routing(wb_url, user, coll, rec, type='replay')
# Replay Coll
@self.app.route('/<user>/<coll>/<wb_url:path>', method='ANY')
def do_replay_coll(user, coll, wb_url):
request.path_shift(2)
return self.handle_routing(wb_url, user, coll, '*', type='replay-coll')
# Session redir
@self.app.get(['/_set_session'])
def set_sesh():
sesh = self.get_session()
if self.is_content_request():
cookie = request.query.getunicode('cookie')
sesh.set_id_from_cookie(cookie)
return self.redirect(request.query.getunicode('path'))
else:
redir_url = request.query.getunicode('redir_back')
if redir_url and redir_url.startswith(self.session_share_origin):
url = redir_url
else:
url = request.environ['wsgi.url_scheme'] + '://' + self.content_host
self.set_options_headers(self.content_host, self.app_host)
response.headers['Cache-Control'] = 'no-cache'
cookie = request.query.getunicode('webrec.sesh_cookie')
# otherwise, check if content cookie provided
# already have same session, just redirect back
# likely a real 404 not found
if sesh.is_same_session(request.query.getunicode('content_cookie')):
redirect(url + request.query.getunicode('path'))
# if anon, ensure session is persisted before setting content session
# generate cookie to pass
if not cookie:
self.access.init_session_user(persist=True)
cookie = sesh.get_cookie()
cookie = quote(cookie)
if not redir_url:
url += '/_set_session'
url += '?{0}&cookie={1}'.format(request.environ['QUERY_STRING'], cookie)
redirect(url)
# OPTIONS
@self.app.route('/_set_session', method='OPTIONS')
def set_sesh_options():
self.set_options_headers(self.content_host, self.app_host)
return ''
@self.app.route('/_clear_session', method='OPTIONS')
def set_clear_options():
self.set_options_headers(self.app_host, self.content_host)
return ''
# CLEAR CONTENT SESSION
@self.app.get(['/_clear_session'])
def clear_sesh():
self.set_options_headers(self.app_host, self.content_host)
response.headers['Cache-Control'] = 'no-cache'
if not self.is_content_request():
self._raise_error(400, 'invalid_request')
try:
# delete session (will updated cookie)
self.get_session().delete()
return {'success': 'logged_out'}
except Exception as e:
self._raise_error(400, 'invalid_request')
def do_proxy(self, url):
info = self.browser_mgr.init_remote_browser_session()
if not info:
return self._raise_error(400, 'invalid_connection_source')
try:
kwargs = info
user = info['the_user']
collection = info['collection']
recording = info['recording']
if kwargs['type'] == 'replay-coll':
collection.sync_coll_index(exists=False, do_async=False)
url = self.add_query(url)
kwargs['url'] = url
wb_url = kwargs.get('timestamp', '') + 'bn_/' + url
request.environ['webrec.template_params'] = kwargs
request.environ['pywb.static_prefix'] = self.BUNDLE_PREFIX
remote_ip = info.get('remote_ip')
if remote_ip and info['type'] in self.MODIFY_MODES:
remote_ip = self.check_rate_limit(user, remote_ip)
kwargs['ip'] = remote_ip
resp = self.render_content(wb_url, kwargs, request.environ)
if self.should_force_cache(resp.status_headers):
resp.status_headers.headers.append(
('Cache-Control', 'public, max-age=54000, immutable')
)
resp = HTTPResponse(body=resp.body,
status=resp.status_headers.statusline,
headers=resp.status_headers.headers)
return resp
except Exception as e:
@self.jinja2_view('content_error.html')
def handle_error(status_code, err_body, environ):
response.status = status_code
kwargs['url'] = url
kwargs['status'] = status_code
kwargs['err_body'] = err_body
kwargs['host_prefix'] = self.get_host_prefix(environ)
kwargs['proxy_magic'] = environ.get('wsgiprox.proxy_host', '')
return kwargs
status_code = 500
if hasattr(e, 'status_code'):
status_code = e.status_code
if hasattr(e, 'body'):
err_body = e.body
elif hasattr(e, 'msg'):
err_body = e.msg
else:
err_body = ''
return handle_error(status_code, err_body, request.environ)
def should_force_cache(self, status_headers):
if not request.environ.get('HTTP_REFERER'):
return False
if not status_headers.statusline.startswith('200'):
return False
if 'no-store' in status_headers.get_header('X-Archive-Orig-Cache-Control', ''):
return False
return True
def check_remote_archive(self, wb_url, mode, wb_url_obj=None):
wb_url_obj = wb_url_obj or WbUrl(wb_url)
res = self.wam_loader.find_archive_for_url(wb_url_obj.url)
if not res:
return
pk, new_url, id_ = res
mode = 'extract:' + id_
new_url = WbUrl(new_url).to_str(mod=wb_url_obj.mod)
return mode, new_url
def do_put_record(self):
reqid = request.query.getunicode('reqid')
info = self.browser_mgr.init_remote_browser_session(reqid=reqid)
if not info:
return self._raise_error(400, 'invalid_connection_source')
user = info['the_user']
collection = info['collection']
recording = info['recording']
kwargs = dict(user=user.name,
coll=collection.my_id,
rec=recording.my_id,
type='put_record')
url = request.query.getunicode('url')
timestamp = request.query.getunicode('timestamp')
if not timestamp:
self._raise_error(400, 'timestamp_missing')
headers = {'Content-Type': request.environ.get('CONTENT_TYPE', 'text/plain')}
#if timestamp:
# headers['WARC-Date'] = timestamp_to_iso_date(timestamp)
ts_url = timestamp + '/' + url if timestamp else url
url_params = {'url': 'urn:' + request.query.get('type', 'metadata') + ':' + ts_url}
upstream_url = self.get_upstream_url('', kwargs, url_params)
data = request.body.read()
print('adding record', upstream_url)
r = requests.put(upstream_url,
data=data,
headers=headers,
)
try:
res = r.json()
if res['success'] != 'true':
print(res)
return {'error_message': 'put_record_failed'}
warc_date = res.get('WARC-Date')
except Exception as e:
print(e)
return {'error_message': 'put_record_failed'}
if self.solr_mgr and request.query.getunicode('type') == 'text':
pid = request.query.getunicode('pid')
page = collection.get_page(pid)
kwargs['pid'] = pid
kwargs['title'] = page.get('title')
kwargs['url'] = url
kwargs['timestamp'] = timestamp or page.get('timestamp')
kwargs['hasScreenshot'] = request.query.getunicode('hasScreenshot')
self.solr_mgr.ingest(data, kwargs)
# update page metadata as well
page['has_text'] = True
page['has_screenshot'] = request.query.getunicode('hasScreenshot')
collection.update_page(page)
return res
def do_create_new_and_redir(self, coll_name, rec_name, wb_url, mode):
new_url, _, _2 = self.do_create_new(coll_name, rec_name, wb_url, mode)
return self.redirect(new_url)
def do_create_new(self, coll_name, rec_title, wb_url, mode, desc=''):
if mode == 'record':
result = self.check_remote_archive(wb_url, mode)
if result:
mode, wb_url = result
user = self.access.init_session_user()
if user.is_anon():
if self.anon_disabled:
self.flash_message('Sorry, anonymous recording is not available.')
self.redirect('/')
return
coll_name = 'temp'
coll_title = 'Temporary Collection'
else:
coll_title = coll_name
coll_name = self.sanitize_title(coll_title)
collection = user.get_collection_by_name(coll_name)
if not collection:
collection = user.create_collection(coll_name, title=coll_title)
recording = self._create_new_rec(collection, rec_title, mode, desc=desc)
if mode.startswith('extract:'):
patch_recording = self._create_new_rec(collection,
self.patch_of_name(recording['title']),
'patch')
recording.set_patch_recording(patch_recording)
patch_rec_name = patch_recording.my_id
else:
patch_rec_name = ''
new_url = '/{user}/{coll}/{rec}/{mode}/{url}'.format(user=user.my_id,
coll=collection.name,
rec=recording.name,
mode=mode,
url=wb_url)
return new_url, recording.my_id, patch_rec_name
def redir_set_session(self):
full_path = request.environ['SCRIPT_NAME'] + request.environ['PATH_INFO']
full_path = self.add_query(full_path)
self.redir_host(self.session_redirect_host, '/_set_session?path=' + quote(full_path))
def _create_new_rec(self, collection, title, mode, desc=''):
#rec_name = self.sanitize_title(title) if title else ''
rec_type = 'patch' if mode == 'patch' else None
return collection.create_recording(title=title,
desc=desc,
rec_type=rec_type)
def patch_of_name(self, name):
return 'Patch of ' + name
def handle_routing(self, wb_url, user, coll_name, rec_name, type,
is_embed=False,
is_display=False,
sources='',
inv_sources='',
redir_route=None):
wb_url = self._full_url(wb_url)
if user == '_new' and redir_route:
return self.do_create_new_and_redir(coll_name, rec_name, wb_url, redir_route)
sesh = self.get_session()
remote_ip = None
frontend_cache_header = None
patch_recording = None
the_user, collection, recording = self.user_manager.get_user_coll_rec(user, coll_name, rec_name)
if not the_user:
msg = 'not_found' if user == 'api' else 'no_such_user'
self._raise_error(404, msg)
coll = collection.my_id if collection else None
rec = recording.my_id if recording else None
if type in self.MODIFY_MODES:
if sesh.is_new() and self.is_content_request():
self.redir_set_session()
if not recording:
self._redir_if_sanitized(self.sanitize_title(rec_name),
rec_name,
wb_url)
# don't auto create recording for inner frame w/o accessing outer frame
self._raise_error(404, 'no_such_recording')
elif not recording.is_open():
# force creation of new recording as this one is closed
self._raise_error(400, 'recording_not_open')
collection.access.assert_can_write_coll(collection)
if the_user.is_out_of_space():
self._raise_error(402, 'out_of_space')
remote_ip = self._get_remote_ip()
remote_ip = self.check_rate_limit(the_user, remote_ip)
if inv_sources and inv_sources != '*':
#patch_rec_name = self.patch_of_name(rec, True)
patch_recording = recording.get_patch_recording()
#patch_recording = collection.get_recording_by_name(patch_rec_name)
if type in ('replay-coll', 'replay'):
if not collection:
self._redir_if_sanitized(self.sanitize_title(coll_name),
coll_name,
wb_url)
if sesh.is_new() and self.is_content_request():
self.redir_set_session()
else:
self._raise_error(404, 'no_such_collection')
access = self.access.check_read_access_public(collection)
if not access:
if sesh.is_new() and self.is_content_request():
self.redir_set_session()
else:
self._raise_error(404, 'no_such_collection')
if access != 'public':
frontend_cache_header = ('Cache-Control', 'private')
if type == 'replay':
if not recording:
self._raise_error(404, 'no_such_recording')
request.environ['SCRIPT_NAME'] = quote(request.environ['SCRIPT_NAME'], safe='/:')
wb_url = self._context_massage(wb_url)
wb_url_obj = WbUrl(wb_url)
is_top_frame = (wb_url_obj.mod == self.frame_mod or wb_url_obj.mod.startswith('$br:'))
if type == 'record' and is_top_frame:
result = self.check_remote_archive(wb_url, type, wb_url_obj)
if result:
mode, wb_url = result
new_url = '/{user}/{coll}/{rec}/{mode}/{url}'.format(user=user,
coll=coll_name,
rec=rec_name,
mode=mode,
url=wb_url)
return self.redirect(new_url)
elif type == 'replay-coll' and not is_top_frame:
collection.sync_coll_index(exists=False, do_async=False)
kwargs = dict(user=user,
id=sesh.get_id(),
coll=coll,
rec=rec,
coll_name=quote(coll_name),
rec_name=quote(rec_name, safe='/*'),
the_user=the_user,
collection=collection,
recording=recording,
patch_recording=patch_recording,
type=type,
sources=sources,
inv_sources=inv_sources,
patch_rec=patch_recording.my_id if patch_recording else None,
ip=remote_ip,
is_embed=is_embed,
is_display=is_display)
# top-frame replay but through a proxy, redirect to original
if is_top_frame and 'wsgiprox.proxy_host' in request.environ:
kwargs['url'] = wb_url_obj.url
kwargs['timestamp'] = wb_url_obj.timestamp
self.browser_mgr.update_local_browser(kwargs)
response.headers['Cache-Control'] = 'no-cache; no-store; must-revalidate'
return redirect(wb_url_obj.url)
try:
self.check_if_content(wb_url_obj, request.environ, is_top_frame)
request.environ['pywb.static_prefix'] = self.BUNDLE_PREFIX
resp = self.render_content(wb_url, kwargs, request.environ)
if frontend_cache_header:
resp.status_headers.headers.append(frontend_cache_header)
resp = HTTPResponse(body=resp.body,
status=resp.status_headers.statusline,
headers=resp.status_headers.headers)
return resp
except WbException as ue:
err_context = {
'url': ue.url,
'status': ue.status_code,
'error': ue.msg.get('error'),
'timestamp': wb_url_obj.timestamp if wb_url_obj else '',
'user': user,
'coll': coll_name,
'rec': rec_name,
'type': type,
'app_host': self.app_host,
}
@self.jinja2_view('content_error.html')
def handle_error(error):
response.status = ue.status_code
return error
if self.content_error_redirect:
return redirect(self.content_error_redirect + '?' + urlencode(err_context), code=307)
else:
return handle_error(err_context)
def check_if_content(self, wb_url, environ, is_top_frame):
if not wb_url.is_replay():
return
if not self.content_host:
return
if is_top_frame:
if self.is_content_request():
self.redir_host(self.app_host)
else:
if not self.is_content_request():
self.redir_host(self.content_host)
def _filter_headers(self, type, status_headers):
if type in ('replay', 'replay-coll'):
new_headers = []
for name, value in status_headers.headers:
if name.lower() != 'set-cookie':
new_headers.append((name, value))
status_headers.headers = new_headers
def _inject_nocache_headers(self, status_headers, kwargs):
if 'browser_id' in kwargs:
status_headers.headers.append(
('Cache-Control', 'no-cache, no-store, max-age=0, must-revalidate')
)
def _redir_if_sanitized(self, id, title, wb_url):
if id != title:
target = request.script_name.replace(title, id)
target += wb_url
self.redirect(target)
def _context_massage(self, wb_url):
# reset HTTP_COOKIE to guarded request_cookie for LiveRewriter
if 'webrec.request_cookie' in request.environ:
request.environ['HTTP_COOKIE'] = request.environ['webrec.request_cookie']
try:
del request.environ['HTTP_X_PUSH_STATE_REQUEST']
except:
pass
#TODO: generalize
if wb_url.endswith('&spf=navigate') and wb_url.startswith('mp_/https://www.youtube.com'):
wb_url = wb_url.replace('&spf=navigate', '')
return wb_url
def add_query(self, url):
if request.query_string:
url += '?' + request.query_string
return url
def _full_url(self, url=''):
request_uri = request.environ.get('REQUEST_URI')
script_name = request.environ.get('SCRIPT_NAME', '') + '/'
if request_uri and script_name and request_uri.startswith(script_name):
url = request_uri[len(script_name):]
else:
if not url:
url = environ.request.environ['SCRIPT_NAME'] + environ.request.environ['PATH_INFO']
url = self.add_query(url)
return url
def get_cookie_key(self, kwargs):
sesh_id = self.get_session().get_id()
return self.dyn_stats.get_cookie_key(kwargs['the_user'],
kwargs['collection'],
kwargs['recording'],
sesh_id=sesh_id)
def add_cookie(self, user, collection, recording, name, value, domain):
sesh_id = self.get_session().get_id()
key = self.dyn_stats.get_cookie_key(user,
collection,
recording,
sesh_id=sesh_id)
self.cookie_tracker.add_cookie(key, domain, name, value)
def _get_remote_ip(self):
remote_ip = request.environ.get('HTTP_X_REAL_IP')
remote_ip = remote_ip or request.environ.get('REMOTE_ADDR', '')
remote_ip = remote_ip.rsplit('.', 1)[0]
return remote_ip
def check_rate_limit(self, user, remote_ip):
# check rate limit and return ip used for further limiting
# if skipping limit, return empty string to avoid incrementing
# rate counter for this request
res = user.is_rate_limited(remote_ip)
if res == True:
self._raise_error(402, 'rate_limit_exceeded')
# if None, then no rate limit at all, return empty string
elif res == None:
return ''
else:
return remote_ip
## RewriterApp overrides
def get_base_url(self, wb_url, kwargs):
# for proxy mode, 'upstream_url' already provided
# just use that
#base_url = kwargs.get('upstream_url')
#if base_url:
# base_url = base_url.format(**kwargs)
# return base_url
type = kwargs['type']
base_url = self.paths[type].format(record_host=self.record_host,
replay_host=self.replay_host,
live_host=self.live_host,
**kwargs)
return base_url
def process_query_cdx(self, cdx, wb_url, kwargs):
rec = kwargs.get('rec')
if not rec or rec == '*':
rec = cdx['source'].split(':', 1)[0]
cdx['rec'] = rec
def get_host_prefix(self, environ):
if self.content_host and 'wsgiprox.proxy_host' not in environ:
return environ['wsgi.url_scheme'] + '://' + self.content_host
else:
return super(ContentController, self).get_host_prefix(environ)
def get_top_url(self, full_prefix, wb_url, cdx, kwargs):
if wb_url.mod != self.frame_mod and self.content_host != self.app_host:
full_prefix = full_prefix.replace(self.content_host, self.app_host)
return super(ContentController, self).get_top_url(full_prefix, wb_url, cdx, kwargs)
def get_top_frame_params(self, wb_url, kwargs):
type = kwargs['type']
top_prefix = super(ContentController, self).get_host_prefix(request.environ)
top_prefix += self.get_rel_prefix(request.environ)
if type == 'live':
return {'curr_mode': type,
'is_embed': kwargs.get('is_embed'),
'is_display': kwargs.get('is_display'),
'top_prefix': top_prefix}
# refresh cookie expiration,
# disable until can guarantee cookie is not changed!
#self.get_session().update_expires()
info = self.get_content_inject_info(kwargs['the_user'],
kwargs['collection'],
kwargs['recording'])
return {'info': info,
'curr_mode': type,
'user': kwargs['user'],
'coll': kwargs['coll'],
'coll_name': kwargs['coll_name'],
'coll_title': info.get('coll_title', ''),
'rec': kwargs['rec'],
'rec_name': kwargs['rec_name'],
'rec_title': info.get('rec_title', ''),
'is_embed': kwargs.get('is_embed'),
'is_display': kwargs.get('is_display'),
'top_prefix': top_prefix,
'sources': kwargs.get('sources'),
'inv_sources': kwargs.get('inv_sources'),
}
def _add_custom_params(self, cdx, resp_headers, kwargs, record):
try:
self._add_stats(cdx, resp_headers, kwargs, record)
except:
import traceback
traceback.print_exc()
def _add_history_page(self, cdx, kwargs, page_title):
if kwargs.get('type') not in self.MODIFY_MODES:
return
collection = kwargs.get('collection')
recording = kwargs.get('recording')
if not collection or not recording:
return
page_data = {'url': cdx['url'],
'timestamp': cdx['timestamp'],
'title': page_title,
'browser': kwargs.get('browser', ''),
}
collection.add_page(page_data, recording)
def _add_stats(self, cdx, resp_headers, kwargs, record):
type_ = kwargs['type']
if type_ == 'replay-coll':
content_len = record.rec_headers.get_header('Content-Length')
if content_len is not None:
Stats(self.redis).incr_replay(int(content_len), kwargs['user'])
if type_ in ('record', 'live'):
return
source = cdx.get('source')
if not source:
return
if source == 'local':
source = 'replay'
if source == 'replay' and type_ == 'patch':
return
orig_source = cdx.get('orig_source_id')
if orig_source:
source = orig_source
ra_rec = None
ra_recording = None
# set source in recording-key
if type_ in self.MODIFY_MODES:
skip = resp_headers.get('Recorder-Skip')
if not skip and source not in ('live', 'replay'):
ra_rec = unquote(resp_headers.get('Recorder-Rec', ''))
ra_rec = ra_rec or kwargs['rec']
recording = kwargs.get('recording')
patch_recording = kwargs.get('patch_recording')
if recording and ra_rec == recording.my_id:
ra_recording = recording
elif patch_recording and ra_rec == patch_recording.my_id:
ra_recording = patch_recording
url = cdx.get('url')
referrer = request.environ.get('HTTP_REFERER')
if not referrer:
referrer = url
elif ('wsgiprox.proxy_host' not in request.environ and
request.environ.get('HTTP_HOST') in referrer):
referrer = url
self.dyn_stats.update_dyn_stats(url, kwargs, referrer, source, ra_recording)
def handle_custom_response(self, environ, wb_url, full_prefix, host_prefix, kwargs):
# don't attempt to check if url is valid by accessing content
kwargs['no_timegate_check'] = True
# test if request specifies a containerized browser
if wb_url.mod.startswith('$br:'):
return self.handle_browser_embed(wb_url, kwargs)
return RewriterApp.handle_custom_response(self, environ, wb_url, full_prefix, host_prefix, kwargs)
def handle_browser_embed(self, wb_url, kwargs):
#handle cbrowsers
browser_id = wb_url.mod.split(':', 1)[1]
kwargs['browser_can_write'] = '1' if self.access.can_write_coll(kwargs['collection']) else '0'
kwargs['remote_ip'] = self._get_remote_ip()
kwargs['url'] = wb_url.url
kwargs['timestamp'] = wb_url.timestamp
kwargs['browser'] = browser_id
# container redis info
inject_data = self.browser_mgr.request_new_browser(kwargs)
if 'error_message' in inject_data:
self._raise_error(400, inject_data['error_message'])
inject_data.update(self.get_top_frame_params(wb_url, kwargs))
inject_data['wb_url'] = wb_url
@self.jinja2_view('browser_embed.html')
def browser_embed(data):
return data
return browser_embed(inject_data)
def get_content_inject_info(self, user, collection, recording):
info = {}
# recording
if recording:
info['rec_id'] = recording.my_id
#info['rec_title'] = quote(recording.get_title(), safe='/ ')
info['size'] = recording.size
else:
info['size'] = collection.size
# collection
info['coll_id'] = collection.name
info['coll_title'] = quote(collection.get_prop('title', collection.name), safe='/ ')
info['coll_desc'] = quote(collection.get_prop('desc', ''))
info['size_remaining'] = user.get_size_remaining()
return info
def construct_wburl(self, url, ts, browser, is_content):
prefix = ts or ''
if browser:
prefix += '$br:' + browser
elif is_content:
prefix += 'mp_'
if prefix:
return prefix + '/' + url
else:
return url
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_dedicated_mgmt
short_description: Configure dedicated management in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and dedicated_mgmt category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
system_dedicated_mgmt:
description:
- Configure dedicated management.
default: null
type: dict
suboptions:
default_gateway:
description:
- Default gateway for dedicated management interface.
type: str
dhcp_end_ip:
description:
- DHCP end IP for dedicated management.
type: str
dhcp_netmask:
description:
- DHCP netmask.
type: str
dhcp_server:
description:
- Enable/disable DHCP server on management interface.
type: str
choices:
- enable
- disable
dhcp_start_ip:
description:
- DHCP start IP for dedicated management.
type: str
interface:
description:
- Dedicated management interface. Source system.interface.name.
type: str
status:
description:
- Enable/disable dedicated management.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure dedicated management.
fortios_system_dedicated_mgmt:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_dedicated_mgmt:
default_gateway: "<your_own_value>"
dhcp_end_ip: "<your_own_value>"
dhcp_netmask: "<your_own_value>"
dhcp_server: "enable"
dhcp_start_ip: "<your_own_value>"
interface: "<your_own_value> (source system.interface.name)"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_dedicated_mgmt_data(json):
option_list = ['default_gateway', 'dhcp_end_ip', 'dhcp_netmask',
'dhcp_server', 'dhcp_start_ip', 'interface',
'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_dedicated_mgmt(data, fos):
vdom = data['vdom']
system_dedicated_mgmt_data = data['system_dedicated_mgmt']
filtered_data = underscore_to_hyphen(filter_system_dedicated_mgmt_data(system_dedicated_mgmt_data))
return fos.set('system',
'dedicated-mgmt',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_dedicated_mgmt']:
resp = system_dedicated_mgmt(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_dedicated_mgmt": {
"required": False, "type": "dict", "default": None,
"options": {
"default_gateway": {"required": False, "type": "str"},
"dhcp_end_ip": {"required": False, "type": "str"},
"dhcp_netmask": {"required": False, "type": "str"},
"dhcp_server": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dhcp_start_ip": {"required": False, "type": "str"},
"interface": {"required": False, "type": "str"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
#coding: utf-8
"""
This module simply sends request to the Digital Ocean API,
and returns their response as a dict.
"""
from requests import codes, RequestException
from dopy import API_TOKEN, API_ENDPOINT
from dopy import common as c
from dopy.exceptions import DoError
REQUEST_METHODS = {
'POST': c.post_request,
'PUT': c.put_request,
'DELETE': c.delete_request,
'GET': c.get_request,
}
class ApiRequest(object):
def __init__(self, uri=None, headers=None, params=None,
timeout=60, method='GET'):
self.set_url(uri)
self.set_headers(headers)
self.params = params
self.timeout = timeout
self.method = method
self.response = None
self._verify_method()
def set_headers(self, headers):
self.headers = {} if not isinstance(headers, dict) else headers
self.headers['Authorization'] = "Bearer %s" % API_TOKEN
def set_url(self, uri):
if uri is None:
uri = '/'
if not uri.startswith('/'):
uri = '/' + uri
self.url = '{}/v2{}'.format(API_ENDPOINT, uri)
def _verify_method(self):
if self.method not in REQUEST_METHODS.keys():
raise DoError('Unsupported method %s' % self.method)
def _verify_status_code(self):
if self.response.status_code != codes.ok:
try:
if 'error_message' in self.response:
raise DoError(self.response['error_message'])
elif 'message' in self.response:
raise DoError(self.response['message'])
except:
# The JSON reponse is bad, so raise an exception with the HTTP status
self.response.raise_for_status()
def _verify_response_id(self):
response = self.response.json()
if response.get('id') == 'not_found':
raise DoError(response['message'])
def run(self):
try:
self.response = REQUEST_METHODS[self.method](self.url, self.params, self.headers, self.timeout)
except ValueError:
raise ValueError("The API server doesn't respond with a valid json")
except RequestException as e:
raise RuntimeError(e)
self._verify_status_code()
self._verify_response_id()
return self.response.json()
class DoApiV2Base(object):
def request(self, path, params={}, method='GET'):
api = ApiRequest(path, params=params, method=method)
return api.run()
@classmethod
def get_endpoint(cls, pathlist=None, trailing_slash=False):
if pathlist is None:
pathlist = []
pathlist.insert(0, cls.endpoint)
if trailing_slash:
pathlist.append('')
return '/'.join(pathlist)
class DoManager(DoApiV2Base):
def __init__(self):
self.api_endpoint = API_ENDPOINT
def retro_execution(self, method_name, *args, **kwargs):
retrometh = {
"all_active_droplets": DoApiDroplets().list,
"new_droplet": DoApiDroplets().create,
"show_droplet": DoApiDroplets().show_droplet,
"droplet_v2_action": DoApiDroplets().droplet_v2_action,
"reboot_droplet": DoApiDroplets().reboot_droplet,
"power_cycle_droplet": DoApiDroplets().power_cycle_droplet,
"shutdown_droplet": DoApiDroplets().shutdown_droplet,
"power_off_droplet": DoApiDroplets().power_off_droplet,
"power_on_droplet": DoApiDroplets().power_on_droplet,
"password_reset_droplet": DoApiDroplets().password_reset_droplet,
"resize_droplet": DoApiDroplets().resize_droplet,
"snapshot_droplet": DoApiDroplets().snapshot_droplet,
"restore_droplet": DoApiDroplets().restore_droplet,
"rebuild_droplet": DoApiDroplets().rebuild_droplet,
"enable_backups_droplet": DoApiDroplets().enable_backups_droplet,
"disable_backups_droplet": DoApiDroplets().disable_backups_droplet,
"rename_droplet": DoApiDroplets().rename_droplet,
"destroy_droplet": DoApiDroplets().destroy_droplet,
"populate_droplet_ips": DoApiDroplets().populate_droplet_ips,
"all_domains": DoApiDomains().list,
"new_domain": DoApiDomains().create,
"show_domain": DoApiDomains().show,
}
return retrometh[method_name](*args, **kwargs)
# regions==========================================
def all_regions(self):
json = self.request('/regions/')
return json['regions']
# images==========================================
def all_images(self, filter='global'):
params = {'filter': filter}
json = self.request('/images/', params)
return json['images']
def private_images(self):
json = self.request('/images?private=true')
return json['images']
def image_v2_action(self, image_id, image_type, params=None):
if params is None:
params = {}
params['type'] = image_type
json = self.request('/images/%s/actions' % image_id, params=params, method='POST')
return json
def show_image(self, image_id):
json = self.request('/images/%s' % image_id)
return json['image']
def destroy_image(self, image_id):
self.request('/images/%s' % image_id, method='DELETE')
return True
def transfer_image(self, image_id, region_id):
params = {'region': region_id}
json = self.image_v2_action(image_id, 'transfer', params)
json.pop('status', None)
return json
# ssh_keys=========================================
def all_ssh_keys(self):
json = self.request('/account/keys')
return json['ssh_keys']
def new_ssh_key(self, name, pub_key):
params = {'name': name, 'public_key': pub_key}
json = self.request('/account/keys', params, method='POST')
return json['ssh_key']
def show_ssh_key(self, key_id):
json = self.request('/account/keys/%s/' % key_id)
return json['ssh_key']
def edit_ssh_key(self, key_id, name, pub_key):
# v2 API doesn't allow to change key body now
params = {'name': name}
json = self.request('/account/keys/%s/' % key_id, params, method='PUT')
return json['ssh_key']
def destroy_ssh_key(self, key_id):
self.request('/account/keys/%s' % key_id, method='DELETE')
return True
# sizes============================================
def sizes(self):
json = self.request('/sizes/')
return json['sizes']
# events(actions in v2 API)========================
def show_all_actions(self):
json = self.request('/actions')
return json['actions']
def show_action(self, action_id):
json = self.request('/actions/%s' % action_id)
return json['action']
def show_event(self, event_id):
return self.show_action(event_id)
class DoApiDroplets(DoApiV2Base):
endpoint = '/droplets'
def list(self):
json = self.request(self.get_endpoint(trailing_slash=True))
for index in range(len(json['droplets'])):
self.populate_droplet_ips(json['droplets'][index])
return json['droplets']
def create(self, name, size_id, image_id, region_id,
ssh_key_ids=None, virtio=True, private_networking=False,
backups_enabled=False, user_data=None, ipv6=False):
params = {
'name': str(name),
'size': str(size_id),
'image': str(image_id),
'region': str(region_id),
'virtio': str(virtio).lower(),
'ipv6': str(ipv6).lower(),
'private_networking': str(private_networking).lower(),
'backups': str(backups_enabled).lower(),
}
if ssh_key_ids:
# Need to be an array in v2
if isinstance(ssh_key_ids, basestring):
ssh_key_ids = [ssh_key_ids]
if isinstance(ssh_key_ids, list):
for index in range(len(ssh_key_ids)):
ssh_key_ids[index] = str(ssh_key_ids[index])
params['ssh_keys'] = ssh_key_ids
if user_data:
params['user_data'] = user_data
json = self.request(self.get_endpoint(), params=params, method='POST')
created_id = json['droplet']['id']
json = self.show_droplet(created_id)
return json
def show_droplet(self, droplet_id):
json = self.request(self.get_endpoint([droplet_id]))
self.populate_droplet_ips(json['droplet'])
return json['droplet']
def droplet_v2_action(self, droplet_id, droplet_type, params=None):
if params is None:
params = {}
params['type'] = droplet_type
return self.request(self.get_endpoint([droplet_id, 'actions']), params=params, method='POST')
def reboot_droplet(self, droplet_id):
json = self.droplet_v2_action(droplet_id, 'reboot')
json.pop('status', None)
return json
def power_cycle_droplet(self, droplet_id):
json = self.droplet_v2_action(droplet_id, 'power_cycle')
json.pop('status', None)
return json
def shutdown_droplet(self, droplet_id):
json = self.droplet_v2_action(droplet_id, 'shutdown')
json.pop('status', None)
return json
def power_off_droplet(self, droplet_id):
json = self.droplet_v2_action(droplet_id, 'power_off')
json.pop('status', None)
return json
def power_on_droplet(self, droplet_id):
json = self.droplet_v2_action(droplet_id, 'power_on')
json.pop('status', None)
return json
def password_reset_droplet(self, droplet_id):
json = self.droplet_v2_action(droplet_id, 'password_reset')
json.pop('status', None)
return json
def resize_droplet(self, droplet_id, size_id):
params = {'size': size_id}
json = self.droplet_v2_action(droplet_id, 'resize', params)
json.pop('status', None)
return json
def snapshot_droplet(self, droplet_id, name):
params = {'name': name}
json = self.droplet_v2_action(droplet_id, 'snapshot', params)
json.pop('status', None)
return json
def restore_droplet(self, droplet_id, image_id):
params = {'image': image_id}
json = self.droplet_v2_action(droplet_id, 'restore', params)
json.pop('status', None)
return json
def rebuild_droplet(self, droplet_id, image_id):
params = {'image': image_id}
json = self.droplet_v2_action(droplet_id, 'rebuild', params)
json.pop('status', None)
return json
def enable_backups_droplet(self, droplet_id):
json = self.droplet_v2_action(droplet_id, 'enable_backups')
json.pop('status', None)
return json
def disable_backups_droplet(self, droplet_id):
json = self.droplet_v2_action(droplet_id, 'disable_backups')
json.pop('status', None)
return json
def rename_droplet(self, droplet_id, name):
params = {'name': name}
json = self.droplet_v2_action(droplet_id, 'rename', params)
json.pop('status', None)
return json
def destroy_droplet(self, droplet_id, scrub_data=True):
json = self.request(self.get_endpoint([droplet_id]), method='DELETE')
json.pop('status', None)
return json
def populate_droplet_ips(self, droplet):
droplet[u'ip_address'] = ''
for networkIndex in range(len(droplet['networks']['v4'])):
network = droplet['networks']['v4'][networkIndex]
if network['type'] == 'public':
droplet[u'ip_address'] = network['ip_address']
if network['type'] == 'private':
droplet[u'private_ip_address'] = network['ip_address']
class DoApiDomains(DoApiV2Base):
endpoint = '/domains'
def list(self):
json = self.request(self.get_endpoint(trailing_slash=True))
return json['domains']
def create(self, name, ip):
json = self.request(self.get_endpoint(), method='POST',
params={'name': name, 'ip_address': ip})
return json['domain']
def show(self, domain_id):
json = self.request(self.get_endpoint([domain_id], trailing_slash=True))
return json['domain']
def destroy_domain(self, domain_id):
self.request(self.get_endpoint([domain_id]), method='DELETE')
# TODO
return True
def all_domain_records(self, domain_id):
json = self.request('/domains/%s/records/' % domain_id)
return json['domain_records']
def new_domain_record(self, domain_id, record_type, data, name=None,
priority=None, port=None, weight=None):
params = {'data': data}
params['type'] = record_type
if name:
params['name'] = name
if priority:
params['priority'] = priority
if port:
params['port'] = port
if weight:
params['weight'] = weight
json = self.request('/domains/%s/records/' % domain_id, params, method='POST')
return json['domain_record']
def show_domain_record(self, domain_id, record_id):
json = self.request('/domains/%s/records/%s' % (domain_id, record_id))
return json['domain_record']
def edit_domain_record(self, domain_id, record_id, record_type, data,
name=None, priority=None, port=None, weight=None):
# API v.2 allows only record name change
params = {'name': name}
json = self.request('/domains/%s/records/%s' % (domain_id, record_id), params, method='PUT')
return json['domain_record']
def destroy_domain_record(self, domain_id, record_id):
self.request('/domains/%s/records/%s' % (domain_id, record_id), method='DELETE')
return True
|
|
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cgi
import os
import random
import re
import string
import tempfile
from swift import gettext_ as _
from exceptions import PLOTLIBNotInstalled, ODFLIBNotInstalled,\
NotFoundException, MethodNotAllowed, DataLoadFailure, ProfileException
from profile_model import Stats2
PLOTLIB_INSTALLED = True
try:
import matplotlib
# use agg backend for writing to file, not for rendering in a window.
# otherwise some platform will complain "no display name and $DISPLAY
# environment variable"
matplotlib.use('agg')
import matplotlib.pyplot as plt
except ImportError:
PLOTLIB_INSTALLED = False
empty_description = """
The default profile of current process or the profile you requested is
empty. <input type="submit" name="refresh" value="Refresh"/>
"""
profile_tmpl = """
<select name="profile">
<option value="current">current</option>
<option value="all">all</option>
${profile_list}
</select>
"""
sort_tmpl = """
<select name="sort">
<option value="time">time</option>
<option value="cumulative">cumulative</option>
<option value="calls">calls</option>
<option value="pcalls">pcalls</option>
<option value="name">name</option>
<option value="file">file</option>
<option value="module">module</option>
<option value="line">line</option>
<option value="nfl">nfl</option>
<option value="stdname">stdname</option>
</select>
"""
limit_tmpl = """
<select name="limit">
<option value="-1">all</option>
<option value="0.1">10%</option>
<option value="0.2">20%</option>
<option value="0.3">30%</option>
<option value="10">10</option>
<option value="20">20</option>
<option value="30">30</option>
<option value="50">50</option>
<option value="100">100</option>
<option value="200">200</option>
<option value="300">300</option>
<option value="400">400</option>
<option value="500">500</option>
</select>
"""
fulldirs_tmpl = """
<input type="checkbox" name="fulldirs" value="1"
${fulldir_checked}/>
"""
mode_tmpl = """
<select name="mode">
<option value="stats">stats</option>
<option value="callees">callees</option>
<option value="callers">callers</option>
</select>
"""
nfl_filter_tmpl = """
<input type="text" name="nfl_filter" value="${nfl_filter}"
placeholder="filename part" />
"""
formelements_tmpl = """
<div>
<table>
<tr>
<td>
<strong>Profile</strong>
<td>
<strong>Sort</strong>
</td>
<td>
<strong>Limit</strong>
</td>
<td>
<strong>Full Path</strong>
</td>
<td>
<strong>Filter</strong>
</td>
<td>
</td>
<td>
<strong>Plot Metric</strong>
</td>
<td>
<strong>Plot Type</strong>
<td>
</td>
<td>
<strong>Format</strong>
</td>
<td>
<td>
</td>
<td>
</td>
</tr>
<tr>
<td>
${profile}
<td>
${sort}
</td>
<td>
${limit}
</td>
<td>
${fulldirs}
</td>
<td>
${nfl_filter}
</td>
<td>
<input type="submit" name="query" value="query"/>
</td>
<td>
<select name='metric'>
<option value='nc'>call count</option>
<option value='cc'>primitive call count</option>
<option value='tt'>total time</option>
<option value='ct'>cumulative time</option>
</select>
</td>
<td>
<select name='plottype'>
<option value='bar'>bar</option>
<option value='pie'>pie</option>
</select>
<td>
<input type="submit" name="plot" value="plot"/>
</td>
<td>
<select name='format'>
<option value='default'>binary</option>
<option value='json'>json</option>
<option value='csv'>csv</option>
<option value='ods'>ODF.ods</option>
</select>
</td>
<td>
<input type="submit" name="download" value="download"/>
</td>
<td>
<input type="submit" name="clear" value="clear"/>
</td>
</tr>
</table>
</div>
"""
index_tmpl = """
<html>
<head>
<title>profile results</title>
<style>
<!--
tr.normal { background-color: #ffffff }
tr.hover { background-color: #88eeee }
//-->
</style>
</head>
<body>
<form action="${action}" method="POST">
<div class="form-text">
${description}
</div>
<hr />
${formelements}
</form>
<pre>
${profilehtml}
</pre>
</body>
</html>
"""
class HTMLViewer(object):
format_dict = {'default': 'application/octet-stream',
'json': 'application/json',
'csv': 'text/csv',
'ods': 'application/vnd.oasis.opendocument.spreadsheet',
'python': 'text/html'}
def __init__(self, app_path, profile_module, profile_log):
self.app_path = app_path
self.profile_module = profile_module
self.profile_log = profile_log
def _get_param(self, query_dict, key, default=None, multiple=False):
value = query_dict.get(key, default)
if value is None or value == '':
return default
if multiple:
return value
if isinstance(value, list):
return eval(value[0]) if isinstance(default, int) else value[0]
else:
return value
def render(self, url, method, path_entry, query_dict, clear_callback):
plot = self._get_param(query_dict, 'plot', None)
download = self._get_param(query_dict, 'download', None)
clear = self._get_param(query_dict, 'clear', None)
action = plot or download or clear
profile_id = self._get_param(query_dict, 'profile', 'current')
sort = self._get_param(query_dict, 'sort', 'time')
limit = self._get_param(query_dict, 'limit', -1)
fulldirs = self._get_param(query_dict, 'fulldirs', 0)
nfl_filter = self._get_param(query_dict, 'nfl_filter', '').strip()
metric_selected = self._get_param(query_dict, 'metric', 'cc')
plot_type = self._get_param(query_dict, 'plottype', 'bar')
download_format = self._get_param(query_dict, 'format', 'default')
content = ''
# GET /__profile, POST /__profile
if len(path_entry) == 2 and method in ['GET', 'POST']:
log_files = self.profile_log.get_logfiles(profile_id)
if action == 'plot':
content, headers = self.plot(log_files, sort, limit,
nfl_filter, metric_selected,
plot_type)
elif action == 'download':
content, headers = self.download(log_files, sort, limit,
nfl_filter, download_format)
else:
if action == 'clear':
self.profile_log.clear(profile_id)
clear_callback and clear_callback()
content, headers = self.index_page(log_files, sort, limit,
fulldirs, nfl_filter,
profile_id, url)
# GET /__profile__/all
# GET /__profile__/current
# GET /__profile__/profile_id
# GET /__profile__/profile_id/
# GET /__profile__/profile_id/account.py:50(GETorHEAD)
# GET /__profile__/profile_id/swift/proxy/controllers
# /account.py:50(GETorHEAD)
# with QUERY_STRING: ?format=[default|json|csv|ods]
elif len(path_entry) > 2 and method == 'GET':
profile_id = path_entry[2]
log_files = self.profile_log.get_logfiles(profile_id)
pids = self.profile_log.get_all_pids()
# return all profiles in a json format by default.
# GET /__profile__/
if profile_id == '':
content = '{"profile_ids": ["' + '","'.join(pids) + '"]}'
headers = [('content-type', self.format_dict['json'])]
else:
if len(path_entry) > 3 and path_entry[3] != '':
nfl_filter = '/'.join(path_entry[3:])
if path_entry[-1].find(':0') == -1:
nfl_filter = '/' + nfl_filter
content, headers = self.download(log_files, sort, -1,
nfl_filter, download_format)
headers.append(('Access-Control-Allow-Origin', '*'))
else:
raise MethodNotAllowed(_('method %s is not allowed.') % method)
return content, headers
def index_page(self, log_files=None, sort='time', limit=-1,
fulldirs=0, nfl_filter='', profile_id='current', url='#'):
headers = [('content-type', 'text/html')]
if len(log_files) == 0:
return empty_description, headers
try:
stats = Stats2(*log_files)
except (IOError, ValueError):
raise DataLoadFailure(_('Can not load profile data from %s.')
% log_files)
if not fulldirs:
stats.strip_dirs()
stats.sort_stats(sort)
nfl_filter_esc =\
nfl_filter.replace('(', '\(').replace(')', '\)')
amount = [nfl_filter_esc, limit] if nfl_filter_esc else [limit]
profile_html = self.generate_stats_html(stats, self.app_path,
profile_id, *amount)
description = "Profiling information is generated by using\
'%s' profiler." % self.profile_module
sort_repl = '<option value="%s">' % sort
sort_selected = '<option value="%s" selected>' % sort
sort = sort_tmpl.replace(sort_repl, sort_selected)
plist = ''.join(['<option value="%s">%s</option>' % (p, p)
for p in self.profile_log.get_all_pids()])
profile_element = string.Template(profile_tmpl).substitute(
{'profile_list': plist})
profile_repl = '<option value="%s">' % profile_id
profile_selected = '<option value="%s" selected>' % profile_id
profile_element = profile_element.replace(profile_repl,
profile_selected)
limit_repl = '<option value="%s">' % limit
limit_selected = '<option value="%s" selected>' % limit
limit = limit_tmpl.replace(limit_repl, limit_selected)
fulldirs_checked = 'checked' if fulldirs else ''
fulldirs_element = string.Template(fulldirs_tmpl).substitute(
{'fulldir_checked': fulldirs_checked})
nfl_filter_element = string.Template(nfl_filter_tmpl).\
substitute({'nfl_filter': nfl_filter})
form_elements = string.Template(formelements_tmpl).substitute(
{'description': description,
'action': url,
'profile': profile_element,
'sort': sort,
'limit': limit,
'fulldirs': fulldirs_element,
'nfl_filter': nfl_filter_element,
}
)
content = string.Template(index_tmpl).substitute(
{'formelements': form_elements,
'action': url,
'description': description,
'profilehtml': profile_html,
})
return content, headers
def download(self, log_files, sort='time', limit=-1, nfl_filter='',
output_format='default'):
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
nfl_esc = nfl_filter.replace('(', '\(').replace(')', '\)')
# remove the slash that is intentionally added in the URL
# to avoid failure of filtering stats data.
if nfl_esc.startswith('/'):
nfl_esc = nfl_esc[1:]
stats = Stats2(*log_files)
stats.sort_stats(sort)
if output_format == 'python':
data = self.format_source_code(nfl_filter)
elif output_format == 'json':
data = stats.to_json(nfl_esc, limit)
elif output_format == 'csv':
data = stats.to_csv(nfl_esc, limit)
elif output_format == 'ods':
data = stats.to_ods(nfl_esc, limit)
else:
data = stats.print_stats()
return data, [('content-type', self.format_dict[output_format])]
except ODFLIBNotInstalled:
raise
except Exception as ex:
raise ProfileException(_('Data download error: %s') % ex)
def plot(self, log_files, sort='time', limit=10, nfl_filter='',
metric_selected='cc', plot_type='bar'):
if not PLOTLIB_INSTALLED:
raise PLOTLIBNotInstalled(_('python-matplotlib not installed.'))
if len(log_files) == 0:
raise NotFoundException(_('no log file found'))
try:
stats = Stats2(*log_files)
stats.sort_stats(sort)
stats_dict = stats.stats
__, func_list = stats.get_print_list([nfl_filter, limit])
nfls = []
performance = []
names = {'nc': 'Total Call Count', 'cc': 'Primitive Call Count',
'tt': 'Total Time', 'ct': 'Cumulative Time'}
for func in func_list:
cc, nc, tt, ct, __ = stats_dict[func]
metric = {'cc': cc, 'nc': nc, 'tt': tt, 'ct': ct}
nfls.append(func[2])
performance.append(metric[metric_selected])
y_pos = range(len(nfls))
error = [random.random() for __ in y_pos]
plt.clf()
if plot_type == 'pie':
plt.pie(x=performance, explode=None, labels=nfls,
autopct='%1.1f%%')
else:
plt.barh(y_pos, performance, xerr=error, align='center',
alpha=0.4)
plt.yticks(y_pos, nfls)
plt.xlabel(names[metric_selected])
plt.title('Profile Statistics (by %s)' % names[metric_selected])
# plt.gcf().tight_layout(pad=1.2)
with tempfile.TemporaryFile() as profile_img:
plt.savefig(profile_img, format='png', dpi=300)
profile_img.seek(0)
data = profile_img.read()
return data, [('content-type', 'image/jpg')]
except Exception as ex:
raise ProfileException(_('plotting results failed due to %s') % ex)
def format_source_code(self, nfl):
nfls = re.split('[:()]', nfl)
file_path = nfls[0]
try:
lineno = int(nfls[1])
except (TypeError, ValueError, IndexError):
lineno = 0
# for security reason, this need to be fixed.
if not file_path.endswith('.py'):
return _('The file type are forbidden to access!')
try:
data = []
i = 0
with open(file_path) as f:
lines = f.readlines()
max_width = str(len(str(len(lines))))
fmt = '<span id="L%d" rel="#L%d">%' + max_width\
+ 'd|<code>%s</code></span>'
for line in lines:
l = cgi.escape(line, quote=None)
i = i + 1
if i == lineno:
fmt2 = '<span id="L%d" style="background-color: \
rgb(127,255,127)">%' + max_width +\
'd|<code>%s</code></span>'
data.append(fmt2 % (i, i, l))
else:
data.append(fmt % (i, i, i, l))
data = ''.join(data)
except Exception:
return _('Can not access the file %s.') % file_path
return '<pre>%s</pre>' % data
def generate_stats_html(self, stats, app_path, profile_id, *selection):
html = []
for filename in stats.files:
html.append('<p>%s</p>' % filename)
try:
for func in stats.top_level:
html.append('<p>%s</p>' % func[2])
html.append('%s function calls' % stats.total_calls)
if stats.total_calls != stats.prim_calls:
html.append("(%d primitive calls)" % stats.prim_calls)
html.append('in %.3f seconds' % stats.total_tt)
if stats.fcn_list:
stat_list = stats.fcn_list[:]
msg = "<p>Ordered by: %s</p>" % stats.sort_type
else:
stat_list = stats.stats.keys()
msg = '<p>Random listing order was used</p>'
for sel in selection:
stat_list, msg = stats.eval_print_amount(sel, stat_list, msg)
html.append(msg)
html.append('<table style="border-width: 1px">')
if stat_list:
html.append('<tr><th>#</th><th>Call Count</th>\
<th>Total Time</th><th>Time/Call</th>\
<th>Cumulative Time</th>\
<th>Cumulative Time/Call</th>\
<th>Filename:Lineno(Function)</th>\
<th>JSON</th>\
</tr>')
count = 0
for func in stat_list:
count = count + 1
html.append('<tr onMouseOver="this.className=\'hover\'"\
onMouseOut="this.className=\'normal\'">\
<td>%d)</td>' % count)
cc, nc, tt, ct, __ = stats.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
html.append('<td>%s</td>' % c)
html.append('<td>%f</td>' % tt)
if nc == 0:
html.append('<td>-</td>')
else:
html.append('<td>%f</td>' % (float(tt) / nc))
html.append('<td>%f</td>' % ct)
if cc == 0:
html.append('<td>-</td>')
else:
html.append('<td>%f</td>' % (float(ct) / cc))
nfls = cgi.escape(stats.func_std_string(func))
if nfls.split(':')[0] not in ['', 'profile'] and\
os.path.isfile(nfls.split(':')[0]):
html.append('<td><a href="%s/%s%s?format=python#L%d">\
%s</a></td>' % (app_path, profile_id,
nfls, func[1], nfls))
else:
html.append('<td>%s</td>' % nfls)
if not nfls.startswith('/'):
nfls = '/' + nfls
html.append('<td><a href="%s/%s%s?format=json">\
--></a></td></tr>' % (app_path,
profile_id, nfls))
except Exception as ex:
html.append("Exception:" % ex.message)
return ''.join(html)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._scope_maps_operations import build_create_request_initial, build_delete_request_initial, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ScopeMapsOperations:
"""ScopeMapsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2021_12_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ScopeMapListResult"]:
"""Lists all the scope maps for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ScopeMapListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2021_12_01_preview.models.ScopeMapListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ScopeMapListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ScopeMapListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
**kwargs: Any
) -> "_models.ScopeMap":
"""Gets the properties of the specified scope map.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param scope_map_name: The name of the scope map.
:type scope_map_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ScopeMap, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2021_12_01_preview.models.ScopeMap
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ScopeMap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
scope_map_name=scope_map_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
scope_map_create_parameters: "_models.ScopeMap",
**kwargs: Any
) -> "_models.ScopeMap":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ScopeMap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(scope_map_create_parameters, 'ScopeMap')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
scope_map_name=scope_map_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ScopeMap', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
scope_map_create_parameters: "_models.ScopeMap",
**kwargs: Any
) -> AsyncLROPoller["_models.ScopeMap"]:
"""Creates a scope map for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param scope_map_name: The name of the scope map.
:type scope_map_name: str
:param scope_map_create_parameters: The parameters for creating a scope map.
:type scope_map_create_parameters:
~azure.mgmt.containerregistry.v2021_12_01_preview.models.ScopeMap
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ScopeMap or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.ScopeMap]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ScopeMap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
scope_map_name=scope_map_name,
scope_map_create_parameters=scope_map_create_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
scope_map_name=scope_map_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a scope map from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param scope_map_name: The name of the scope map.
:type scope_map_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
scope_map_name=scope_map_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
scope_map_update_parameters: "_models.ScopeMapUpdateParameters",
**kwargs: Any
) -> "_models.ScopeMap":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ScopeMap"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(scope_map_update_parameters, 'ScopeMapUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
scope_map_name=scope_map_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ScopeMap', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
scope_map_name: str,
scope_map_update_parameters: "_models.ScopeMapUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.ScopeMap"]:
"""Updates a scope map with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param scope_map_name: The name of the scope map.
:type scope_map_name: str
:param scope_map_update_parameters: The parameters for updating a scope map.
:type scope_map_update_parameters:
~azure.mgmt.containerregistry.v2021_12_01_preview.models.ScopeMapUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ScopeMap or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.ScopeMap]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ScopeMap"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
scope_map_name=scope_map_name,
scope_map_update_parameters=scope_map_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ScopeMap', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/scopeMaps/{scopeMapName}'} # type: ignore
|
|
"""
Implement slices and various slice computations.
"""
import itertools
from llvmlite import ir
from numba.six.moves import zip_longest
from numba import cgutils, types, typing
from .imputils import (lower_builtin, lower_getattr,
iternext_impl, impl_ret_borrowed,
impl_ret_new_ref, impl_ret_untracked)
def fix_index(builder, idx, size):
"""
Fix negative index by adding *size* to it. Positive
indices are left untouched.
"""
is_negative = builder.icmp_signed('<', idx, ir.Constant(size.type, 0))
wrapped_index = builder.add(idx, size)
return builder.select(is_negative, wrapped_index, idx)
def fix_slice(builder, slice, size):
"""
Fix *slice* start and stop to be valid (inclusive and exclusive, resp)
indexing bounds for a sequence of the given *size*.
"""
# See PySlice_GetIndicesEx()
zero = ir.Constant(size.type, 0)
minus_one = ir.Constant(size.type, -1)
def fix_bound(bound_name, lower_repl, upper_repl):
bound = getattr(slice, bound_name)
bound = fix_index(builder, bound, size)
# Store value
setattr(slice, bound_name, bound)
# Still negative? => clamp to lower_repl
underflow = builder.icmp_signed('<', bound, zero)
with builder.if_then(underflow, likely=False):
setattr(slice, bound_name, lower_repl)
# Greater than size? => clamp to upper_repl
overflow = builder.icmp_signed('>=', bound, size)
with builder.if_then(overflow, likely=False):
setattr(slice, bound_name, upper_repl)
with builder.if_else(cgutils.is_neg_int(builder, slice.step)) as (if_neg_step, if_pos_step):
with if_pos_step:
# < 0 => 0; >= size => size
fix_bound('start', zero, size)
fix_bound('stop', zero, size)
with if_neg_step:
# < 0 => -1; >= size => size - 1
lower = minus_one
upper = builder.add(size, minus_one)
fix_bound('start', lower, upper)
fix_bound('stop', lower, upper)
def get_slice_length(builder, slicestruct):
"""
Given a slice, compute the number of indices it spans, i.e. the
number of iterations that for_range_slice() will execute.
Pseudo-code:
assert step != 0
if step > 0:
if stop <= start:
return 0
else:
return (stop - start - 1) // step + 1
else:
if stop >= start:
return 0
else:
return (stop - start + 1) // step + 1
(see PySlice_GetIndicesEx() in CPython)
"""
start = slicestruct.start
stop = slicestruct.stop
step = slicestruct.step
one = ir.Constant(start.type, 1)
zero = ir.Constant(start.type, 0)
is_step_negative = cgutils.is_neg_int(builder, step)
delta = builder.sub(stop, start)
# Nominal case
pos_dividend = builder.sub(delta, one)
neg_dividend = builder.add(delta, one)
dividend = builder.select(is_step_negative, neg_dividend, pos_dividend)
nominal_length = builder.add(one, builder.sdiv(dividend, step))
# Catch zero length
is_zero_length = builder.select(is_step_negative,
builder.icmp_signed('>=', delta, zero),
builder.icmp_signed('<=', delta, zero))
# Clamp to 0 if is_zero_length
return builder.select(is_zero_length, zero, nominal_length)
def get_slice_bounds(builder, slicestruct):
"""
Return the [lower, upper) indexing bounds of a slice.
"""
start = slicestruct.start
stop = slicestruct.stop
zero = start.type(0)
one = start.type(1)
# This is a bit pessimal, e.g. it will return [1, 5) instead
# of [1, 4) for `1:5:2`
is_step_negative = builder.icmp_signed('<', slicestruct.step, zero)
lower = builder.select(is_step_negative,
builder.add(stop, one), start)
upper = builder.select(is_step_negative,
builder.add(start, one), stop)
return lower, upper
def fix_stride(builder, slice, stride):
"""
Fix the given stride for the slice's step.
"""
return builder.mul(slice.step, stride)
def guard_invalid_slice(context, builder, typ, slicestruct):
"""
Guard against *slicestruct* having a zero step (and raise ValueError).
"""
if typ.has_step:
cgutils.guard_null(context, builder, slicestruct.step,
(ValueError, "slice step cannot be zero"))
def get_defaults(context):
"""
Get the default values for a slice's members:
(start for positive step, start for negative step,
stop for positive step, stop for negative step, step)
"""
maxint = (1 << (context.address_size - 1)) - 1
return (0, maxint, maxint, - maxint - 1, 1)
#---------------------------------------------------------------------------
# The slice structure
@lower_builtin(slice, types.VarArg(types.Any))
def slice_constructor_impl(context, builder, sig, args):
default_start_pos, default_start_neg, default_stop_pos, default_stop_neg, default_step = \
[context.get_constant(types.intp, x) for x in get_defaults(context)]
# Fetch non-None arguments
slice_args = [None] * 3
for i, (ty, val) in enumerate(zip(sig.args, args)):
if ty is types.none:
slice_args[i] = None
else:
slice_args[i] = val
# Fill omitted arguments
def get_arg_value(i, default):
val = slice_args[i]
if val is None:
return default
else:
return val
step = get_arg_value(2, default_step)
is_step_negative = builder.icmp_signed('<', step,
context.get_constant(types.intp, 0))
default_stop = builder.select(is_step_negative,
default_stop_neg, default_stop_pos)
default_start = builder.select(is_step_negative,
default_start_neg, default_start_pos)
stop = get_arg_value(1, default_stop)
start = get_arg_value(0, default_start)
ty = sig.return_type
sli = context.make_helper(builder, sig.return_type)
sli.start = start
sli.stop = stop
sli.step = step
res = sli._getvalue()
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower_getattr(types.SliceType, "start")
def slice_start_impl(context, builder, typ, value):
sli = context.make_helper(builder, typ, value)
return sli.start
@lower_getattr(types.SliceType, "stop")
def slice_stop_impl(context, builder, typ, value):
sli = context.make_helper(builder, typ, value)
return sli.stop
@lower_getattr(types.SliceType, "step")
def slice_step_impl(context, builder, typ, value):
if typ.has_step:
sli = context.make_helper(builder, typ, value)
return sli.step
else:
return context.get_constant(types.intp, 1)
|
|
"""
Type inference base on CPA.
The algorithm guarantees monotonic growth of type-sets for each variable.
Steps:
1. seed initial types
2. build constraints
3. propagate constraints
4. unify types
Constraint propagation is precise and does not regret (no backtracing).
Constraints push types forward following the dataflow.
"""
from __future__ import print_function, division, absolute_import
from pprint import pprint
import itertools
import traceback
from numba import ir, types, utils, config, six
from .errors import TypingError
class TypeVar(object):
def __init__(self, context, var):
self.context = context
self.var = var
self.type = None
self.locked = False
def add_type(self, tp):
assert isinstance(tp, types.Type), type(tp)
if self.locked:
if tp != self.type:
if self.context.can_convert(tp, self.type) is None:
raise TypingError("No conversion from %s to %s for "
"'%s'" % (tp, self.type, self.var))
else:
if self.type is not None:
unified = self.context.unify_pairs(self.type, tp)
if unified is types.pyobject:
raise TypingError("cannot unify %s and %s for '%s'"
% (self.type, tp, self.var))
else:
unified = tp
self.type = unified
return self.type
def lock(self, tp):
assert isinstance(tp, types.Type), type(tp)
assert not self.locked
# If there is already a type, ensure we can convert it to the
# locked type.
if (self.type is not None and
self.context.can_convert(self.type, tp) is None):
raise TypingError("No conversion from %s to %s for "
"'%s'" % (tp, self.type, self.var))
self.type = tp
self.locked = True
def union(self, other):
if other.type is not None:
self.add_type(other.type)
return self.type
def __repr__(self):
return '%s := %s' % (self.var, self.type)
@property
def defined(self):
return self.type is not None
def get(self):
return (self.type,) if self.type is not None else ()
def getone(self):
assert self.type is not None
return self.type
def __len__(self):
return 1 if self.type is not None else 0
class ConstraintNetwork(object):
"""
TODO: It is possible to optimize constraint propagation to consider only
dirty type variables.
"""
def __init__(self):
self.constraints = []
def append(self, constraint):
self.constraints.append(constraint)
def propagate(self, typeinfer):
"""
Execute all constraints. Errors are caught and returned as a list.
This allows progressing even though some constraints may fail
due to lack of information (e.g. imprecise types such as List(undefined)).
"""
errors = []
for constraint in self.constraints:
try:
constraint(typeinfer)
except TypingError as e:
errors.append(e)
except Exception:
msg = "Internal error at {con}:\n{sep}\n{err}{sep}\n"
e = TypingError(msg.format(con=constraint,
err=traceback.format_exc(),
sep='--%<' +'-' * 65),
loc=constraint.loc)
errors.append(e)
return errors
class Propagate(object):
"""
A simple constraint for direct propagation of types for assignments.
"""
def __init__(self, dst, src, loc):
self.dst = dst
self.src = src
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
typeinfer.copy_type(self.src, self.dst)
# If `dst` is refined, notify us
typeinfer.refine_map[self.dst] = self
def refine(self, typeinfer, target_type):
# Do not back-propagate to locked variables (e.g. constants)
typeinfer.add_type(self.src, target_type, unless_locked=True)
class BuildTupleConstraint(object):
def __init__(self, target, items, loc):
self.target = target
self.items = items
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
tsets = [typevars[i.name].get() for i in self.items]
oset = typevars[self.target]
for vals in itertools.product(*tsets):
if vals and all(vals[0] == v for v in vals):
tup = types.UniTuple(dtype=vals[0], count=len(vals))
else:
# empty tuples fall here as well
tup = types.Tuple(vals)
typeinfer.add_type(self.target, tup)
class BuildListConstraint(object):
def __init__(self, target, items, loc):
self.target = target
self.items = items
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
oset = typevars[self.target]
tsets = [typevars[i.name].get() for i in self.items]
if not tsets:
typeinfer.add_type(self.target, types.List(types.undefined))
else:
for typs in itertools.product(*tsets):
unified = typeinfer.context.unify_types(*typs)
typeinfer.add_type(self.target, types.List(unified))
class ExhaustIterConstraint(object):
def __init__(self, target, count, iterator, loc):
self.target = target
self.count = count
self.iterator = iterator
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.iterator.name].get():
if isinstance(tp, types.BaseTuple):
if len(tp) == self.count:
typeinfer.add_type(self.target, tp)
else:
raise ValueError("wrong tuple length for %r: "
"expected %d, got %d"
% (self.iterator.name, self.count, len(tp)))
elif isinstance(tp, types.IterableType):
tup = types.UniTuple(dtype=tp.iterator_type.yield_type,
count=self.count)
typeinfer.add_type(self.target, tup)
class PairFirstConstraint(object):
def __init__(self, target, pair, loc):
self.target = target
self.pair = pair
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
typeinfer.add_type(self.target, tp.first_type)
class PairSecondConstraint(object):
def __init__(self, target, pair, loc):
self.target = target
self.pair = pair
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.pair.name].get():
if not isinstance(tp, types.Pair):
# XXX is this an error?
continue
typeinfer.add_type(self.target, tp.second_type)
class StaticGetItemConstraint(object):
def __init__(self, target, value, index, loc):
self.target = target
self.value = value
self.index = index
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
oset = typevars[self.target]
for tp in typevars[self.value.name].get():
if isinstance(tp, types.BaseTuple):
typeinfer.add_type(self.target, tp.types[self.index])
class CallConstraint(object):
"""Constraint for calling functions.
Perform case analysis foreach combinations of argument types.
"""
signature = None
def __init__(self, target, func, args, kws, vararg, loc):
self.target = target
self.func = func
self.args = args
self.kws = kws or {}
self.vararg = vararg
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
fnty = typevars[self.func].getone()
self.resolve(typeinfer, typevars, fnty)
def resolve(self, typeinfer, typevars, fnty):
assert fnty
context = typeinfer.context
n_pos_args = len(self.args)
kwds = [kw for (kw, var) in self.kws]
argtypes = [typevars[a.name] for a in self.args]
argtypes += [typevars[var.name] for (kw, var) in self.kws]
if self.vararg is not None:
argtypes.append(typevars[self.vararg.name])
if not (a.defined for a in argtypes):
# Cannot resolve call type until all argument types are known
return
args = tuple(a.getone() for a in argtypes)
pos_args = args[:n_pos_args]
if self.vararg is not None:
if not isinstance(args[-1], types.BaseTuple):
# Unsuitable for *args
# (Python is more lenient and accepts all iterables)
return
pos_args += args[-1].types
args = args[:-1]
kw_args = dict(zip(kwds, args[n_pos_args:]))
sig = context.resolve_function_type(fnty, pos_args, kw_args)
if sig is None:
desc = context.explain_function_type(fnty)
headtemp = "Invalid usage of {0} with parameters ({1})"
head = headtemp.format(fnty, ', '.join(map(str, args)))
msg = '\n'.join([head, desc])
raise TypingError(msg, loc=self.loc)
typeinfer.add_type(self.target, sig.return_type)
# If the function is a bound function and its receiver type
# was refined, propagate it.
if (isinstance(fnty, types.BoundFunction)
and sig.recvr is not None
and sig.recvr != fnty.this):
refined_this = context.unify_pairs(sig.recvr, fnty.this)
if refined_this.is_precise():
refined_fnty = types.BoundFunction(fnty.template,
this=refined_this)
typeinfer.propagate_refined_type(self.func, refined_fnty)
self.signature = sig
def get_call_signature(self):
return self.signature
class IntrinsicCallConstraint(CallConstraint):
def __call__(self, typeinfer):
self.resolve(typeinfer, typeinfer.typevars, fnty=self.func)
class GetAttrConstraint(object):
def __init__(self, target, attr, value, loc, inst):
self.target = target
self.attr = attr
self.value = value
self.loc = loc
self.inst = inst
def __call__(self, typeinfer):
typevars = typeinfer.typevars
valtys = typevars[self.value.name].get()
for ty in valtys:
try:
attrty = typeinfer.context.resolve_getattr(value=ty, attr=self.attr)
except KeyError:
args = (self.attr, ty, self.value.name, self.inst)
msg = "Unknown attribute '%s' for %s %s %s" % args
raise TypingError(msg, loc=self.inst.loc)
else:
typeinfer.add_type(self.target, attrty)
typeinfer.refine_map[self.target] = self
def refine(self, typeinfer, target_type):
if isinstance(target_type, types.BoundFunction):
recvr = target_type.this
typeinfer.add_type(self.value.name, recvr)
source_constraint = typeinfer.refine_map.get(self.value.name)
if source_constraint is not None:
source_constraint.refine(typeinfer, recvr)
def __repr__(self):
return 'resolving type of attribute "{attr}" of "{value}"'.format(
value=self.value, attr=self.attr)
class SetItemConstraint(object):
def __init__(self, target, index, value, loc):
self.target = target
self.index = index
self.value = value
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
targettys = typevars[self.target.name].get()
idxtys = typevars[self.index.name].get()
valtys = typevars[self.value.name].get()
for ty, it, vt in itertools.product(targettys, idxtys, valtys):
if not typeinfer.context.resolve_setitem(target=ty,
index=it, value=vt):
raise TypingError("Cannot resolve setitem: %s[%s] = %s" %
(ty, it, vt), loc=self.loc)
class DelItemConstraint(object):
def __init__(self, target, index, loc):
self.target = target
self.index = index
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
targettys = typevars[self.target.name].get()
idxtys = typevars[self.index.name].get()
for ty, it in itertools.product(targettys, idxtys):
if not typeinfer.context.resolve_delitem(target=ty, index=it):
raise TypingError("Cannot resolve delitem: %s[%s]" %
(ty, it), loc=self.loc)
class SetAttrConstraint(object):
def __init__(self, target, attr, value, loc):
self.target = target
self.attr = attr
self.value = value
self.loc = loc
def __call__(self, typeinfer):
typevars = typeinfer.typevars
targettys = typevars[self.target.name].get()
valtys = typevars[self.value.name].get()
for ty, vt in itertools.product(targettys, valtys):
if not typeinfer.context.resolve_setattr(target=ty,
attr=self.attr,
value=vt):
raise TypingError("Cannot resolve setattr: (%s).%s = %s" %
(ty, self.attr, vt), loc=self.loc)
class TypeVarMap(dict):
def set_context(self, context):
self.context = context
def __getitem__(self, name):
if name not in self:
self[name] = TypeVar(self.context, name)
return super(TypeVarMap, self).__getitem__(name)
def __setitem__(self, name, value):
assert isinstance(name, str)
if name in self:
raise KeyError("Cannot redefine typevar %s" % name)
else:
super(TypeVarMap, self).__setitem__(name, value)
class TypeInferer(object):
"""
Operates on block that shares the same ir.Scope.
"""
def __init__(self, context, interp):
self.context = context
self.blocks = interp.blocks
self.generator_info = interp.generator_info
self.py_func = interp.bytecode.func
self.typevars = TypeVarMap()
self.typevars.set_context(context)
self.constraints = ConstraintNetwork()
# { index: mangled name }
self.arg_names = {}
self.return_type = None
# Set of assumed immutable globals
self.assumed_immutables = set()
# Track all calls
self.usercalls = []
self.intrcalls = []
self.delitemcalls = []
self.setitemcalls = []
self.setattrcalls = []
# Target var -> constraint with refine hook
self.refine_map = {}
if config.DEBUG or config.DEBUG_TYPEINFER:
self.debug = TypeInferDebug(self)
else:
self.debug = NullDebug()
def _mangle_arg_name(self, name):
# Disambiguise argument name
return "arg.%s" % (name,)
def seed_argument(self, name, index, typ):
name = self._mangle_arg_name(name)
self.seed_type(name, typ)
self.arg_names[index] = name
def seed_type(self, name, typ):
"""All arguments should be seeded.
"""
self.lock_type(name, typ)
def seed_return(self, typ):
"""Seeding of return value is optional.
"""
for blk in utils.itervalues(self.blocks):
inst = blk.terminator
if isinstance(inst, ir.Return):
self.lock_type(inst.value.name, typ)
def build_constraint(self):
for blk in utils.itervalues(self.blocks):
for inst in blk.body:
self.constrain_statement(inst)
def propagate(self):
newtoken = self.get_state_token()
oldtoken = None
# Since the number of types are finite, the typesets will eventually
# stop growing.
while newtoken != oldtoken:
self.debug.propagate_started()
oldtoken = newtoken
# Errors can appear when the type set is incomplete; only
# raise them when there is no progress anymore.
errors = self.constraints.propagate(self)
newtoken = self.get_state_token()
self.debug.propagate_finished()
if errors:
raise errors[0]
def add_type(self, var, tp, unless_locked=False):
assert isinstance(var, str), type(var)
tv = self.typevars[var]
if unless_locked and tv.locked:
return
unified = tv.add_type(tp)
self.propagate_refined_type(var, unified)
def copy_type(self, src_var, dest_var):
unified = self.typevars[dest_var].union(self.typevars[src_var])
def lock_type(self, var, tp):
tv = self.typevars[var]
tv.lock(tp)
def propagate_refined_type(self, updated_var, updated_type):
source_constraint = self.refine_map.get(updated_var)
if source_constraint is not None:
source_constraint.refine(self, updated_type)
def unify(self):
"""
Run the final unification pass over all inferred types, and
catch imprecise types.
"""
typdict = utils.UniqueDict()
def check_var(name):
tv = self.typevars[name]
if not tv.defined:
raise TypingError("Undefined variable '%s'" % (var,))
tp = tv.getone()
if not tp.is_precise():
raise TypingError("Can't infer type of variable '%s': %s" % (var, tp))
typdict[var] = tp
# For better error display, check first user-visible vars, then
# temporaries
temps = set(k for k in self.typevars if not k[0].isalpha())
others = set(self.typevars) - temps
for var in sorted(others):
check_var(var)
for var in sorted(temps):
check_var(var)
retty = self.get_return_type(typdict)
fntys = self.get_function_types(typdict)
if self.generator_info:
retty = self.get_generator_type(typdict, retty)
self.debug.unify_finished(typdict, retty, fntys)
return typdict, retty, fntys
def get_generator_type(self, typdict, retty):
gi = self.generator_info
arg_types = [None] * len(self.arg_names)
for index, name in self.arg_names.items():
arg_types[index] = typdict[name]
state_types = [typdict[var_name] for var_name in gi.state_vars]
yield_types = [typdict[y.inst.value.name] for y in gi.get_yield_points()]
if not yield_types:
raise TypingError("Cannot type generator: it does not yield any value")
yield_type = self.context.unify_types(*yield_types)
return types.Generator(self.py_func, yield_type, arg_types, state_types,
has_finalizer=True)
def get_function_types(self, typemap):
"""
Fill and return a calltypes map using the inferred `typemap`.
"""
# XXX why can't this be done on the fly?
calltypes = utils.UniqueDict()
for call, constraint in self.intrcalls:
calltypes[call] = constraint.get_call_signature()
for call, args, kws, vararg in self.usercalls:
if isinstance(call.func, ir.Intrinsic):
signature = call.func.type
else:
fnty = typemap[call.func.name]
args = tuple(typemap[a.name] for a in args)
kws = dict((kw, typemap[var.name]) for (kw, var) in kws)
if vararg is not None:
tp = typemap[vararg.name]
assert isinstance(tp, types.BaseTuple)
args = args + tp.types
signature = self.context.resolve_function_type(fnty, args, kws)
assert signature is not None, (fnty, args, kws, vararg)
calltypes[call] = signature
for inst in self.delitemcalls:
target = typemap[inst.target.name]
index = typemap[inst.index.name]
signature = self.context.resolve_delitem(target, index)
calltypes[inst] = signature
for inst in self.setitemcalls:
target = typemap[inst.target.name]
index = typemap[inst.index.name]
value = typemap[inst.value.name]
signature = self.context.resolve_setitem(target, index, value)
calltypes[inst] = signature
for inst in self.setattrcalls:
target = typemap[inst.target.name]
attr = inst.attr
value = typemap[inst.value.name]
signature = self.context.resolve_setattr(target, attr, value)
calltypes[inst] = signature
return calltypes
def get_return_type(self, typemap):
rettypes = set()
for blk in utils.itervalues(self.blocks):
term = blk.terminator
if isinstance(term, ir.Return):
rettypes.add(typemap[term.value.name])
if rettypes:
unified = self.context.unify_types(*rettypes)
if not unified.is_precise():
raise TypingError("Can't unify return type from the "
"following types: %s"
% ", ".join(sorted(map(str, rettypes))))
return unified
else:
return types.none
def get_state_token(self):
"""The algorithm is monotonic. It can only grow or "refine" the
typevar map.
"""
return [tv.type for name, tv in sorted(self.typevars.items())]
def constrain_statement(self, inst):
if isinstance(inst, ir.Assign):
self.typeof_assign(inst)
elif isinstance(inst, ir.SetItem):
self.typeof_setitem(inst)
elif isinstance(inst, ir.DelItem):
self.typeof_delitem(inst)
elif isinstance(inst, ir.SetAttr):
self.typeof_setattr(inst)
elif isinstance(inst, (ir.Jump, ir.Branch, ir.Return, ir.Del)):
pass
elif isinstance(inst, ir.Raise):
pass
else:
raise NotImplementedError(inst)
def typeof_setitem(self, inst):
constraint = SetItemConstraint(target=inst.target, index=inst.index,
value=inst.value, loc=inst.loc)
self.constraints.append(constraint)
self.setitemcalls.append(inst)
def typeof_delitem(self, inst):
constraint = DelItemConstraint(target=inst.target, index=inst.index,
loc=inst.loc)
self.constraints.append(constraint)
self.delitemcalls.append(inst)
def typeof_setattr(self, inst):
constraint = SetAttrConstraint(target=inst.target, attr=inst.attr,
value=inst.value, loc=inst.loc)
self.constraints.append(constraint)
self.setattrcalls.append(inst)
def typeof_assign(self, inst):
value = inst.value
if isinstance(value, ir.Const):
self.typeof_const(inst, inst.target, value.value)
elif isinstance(value, ir.Var):
self.constraints.append(Propagate(dst=inst.target.name,
src=value.name, loc=inst.loc))
elif isinstance(value, (ir.Global, ir.FreeVar)):
self.typeof_global(inst, inst.target, value)
elif isinstance(value, ir.Arg):
self.typeof_arg(inst, inst.target, value)
elif isinstance(value, ir.Expr):
self.typeof_expr(inst, inst.target, value)
elif isinstance(value, ir.Yield):
self.typeof_yield(inst, inst.target, value)
else:
raise NotImplementedError(type(value), str(value))
def resolve_value_type(self, inst, val):
"""
Resolve the type of a simple Python value, such as can be
represented by literals.
"""
ty = self.context.resolve_value_type(val)
if ty is None:
msg = "Unsupported Python value %r" % (val,)
raise TypingError(msg, loc=inst.loc)
else:
return ty
def typeof_arg(self, inst, target, arg):
src_name = self._mangle_arg_name(arg.name)
self.constraints.append(Propagate(dst=target.name,
src=src_name,
loc=inst.loc))
def typeof_const(self, inst, target, const):
self.lock_type(target.name, self.resolve_value_type(inst, const))
def typeof_yield(self, inst, target, yield_):
# Sending values into generators isn't supported.
self.add_type(target.name, types.none)
def sentry_modified_builtin(self, inst, gvar):
"""Ensure that builtins are modified.
"""
if (gvar.name in ('range', 'xrange') and
gvar.value not in utils.RANGE_ITER_OBJECTS):
bad = True
elif gvar.name == 'slice' and gvar.value is not slice:
bad = True
elif gvar.name == 'len' and gvar.value is not len:
bad = True
else:
bad = False
if bad:
raise TypingError("Modified builtin '%s'" % gvar.name,
loc=inst.loc)
def typeof_global(self, inst, target, gvar):
typ = self.context.resolve_value_type(gvar.value)
if isinstance(typ, types.Array):
# Global array in nopython mode is constant
# XXX why layout='C'?
typ = typ.copy(layout='C', readonly=True)
if typ is not None:
self.sentry_modified_builtin(inst, gvar)
self.lock_type(target.name, typ)
self.assumed_immutables.add(inst)
else:
raise TypingError("Untyped global name '%s'" % gvar.name,
loc=inst.loc)
def typeof_expr(self, inst, target, expr):
if expr.op == 'call':
if isinstance(expr.func, ir.Intrinsic):
restype = expr.func.type.return_type
self.add_type(target.name, restype)
self.usercalls.append((inst.value, expr.args, expr.kws, None))
else:
self.typeof_call(inst, target, expr)
elif expr.op in ('getiter', 'iternext'):
self.typeof_intrinsic_call(inst, target, expr.op, expr.value)
elif expr.op == 'exhaust_iter':
constraint = ExhaustIterConstraint(target.name, count=expr.count,
iterator=expr.value,
loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == 'pair_first':
constraint = PairFirstConstraint(target.name, pair=expr.value,
loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == 'pair_second':
constraint = PairSecondConstraint(target.name, pair=expr.value,
loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == 'binop':
self.typeof_intrinsic_call(inst, target, expr.fn, expr.lhs, expr.rhs)
elif expr.op == 'inplace_binop':
self.typeof_intrinsic_call(inst, target, expr.fn,
expr.lhs, expr.rhs)
elif expr.op == 'unary':
self.typeof_intrinsic_call(inst, target, expr.fn, expr.value)
elif expr.op == 'static_getitem':
constraint = StaticGetItemConstraint(target.name, value=expr.value,
index=expr.index,
loc=expr.loc)
self.constraints.append(constraint)
elif expr.op == 'getitem':
self.typeof_intrinsic_call(inst, target, expr.op, expr.value,
expr.index)
elif expr.op == 'getattr':
constraint = GetAttrConstraint(target.name, attr=expr.attr,
value=expr.value, loc=inst.loc,
inst=inst)
self.constraints.append(constraint)
elif expr.op == 'build_tuple':
constraint = BuildTupleConstraint(target.name, items=expr.items,
loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == 'build_list':
constraint = BuildListConstraint(target.name, items=expr.items,
loc=inst.loc)
self.constraints.append(constraint)
elif expr.op == 'cast':
self.constraints.append(Propagate(dst=target.name,
src=expr.value.name,
loc=inst.loc))
else:
raise NotImplementedError(type(expr), expr)
def typeof_call(self, inst, target, call):
constraint = CallConstraint(target.name, call.func.name, call.args,
call.kws, call.vararg, loc=inst.loc)
self.constraints.append(constraint)
self.usercalls.append((inst.value, call.args, call.kws, call.vararg))
def typeof_intrinsic_call(self, inst, target, func, *args):
constraint = IntrinsicCallConstraint(target.name, func, args,
kws=(), vararg=None, loc=inst.loc)
self.constraints.append(constraint)
self.intrcalls.append((inst.value, constraint))
class NullDebug(object):
def propagate_started(self):
pass
def propagate_finished(self):
pass
def unify_finished(self, typdict, retty, fntys):
pass
class TypeInferDebug(object):
def __init__(self, typeinfer):
self.typeinfer = typeinfer
def _dump_state(self):
print('---- type variables ----')
pprint([v for k, v in sorted(self.typeinfer.typevars.items())])
def propagate_started(self):
print("propagate".center(80, '-'))
def propagate_finished(self):
self._dump_state()
def unify_finished(self, typdict, retty, fntys):
print("Variable types".center(80, "-"))
pprint(typdict)
print("Return type".center(80, "-"))
pprint(retty)
print("Call types".center(80, "-"))
pprint(fntys)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.appengine_admin_v1.services.applications.client import (
ApplicationsClient,
)
from google.cloud.appengine_admin_v1.services.applications.async_client import (
ApplicationsAsyncClient,
)
from google.cloud.appengine_admin_v1.services.authorized_certificates.client import (
AuthorizedCertificatesClient,
)
from google.cloud.appengine_admin_v1.services.authorized_certificates.async_client import (
AuthorizedCertificatesAsyncClient,
)
from google.cloud.appengine_admin_v1.services.authorized_domains.client import (
AuthorizedDomainsClient,
)
from google.cloud.appengine_admin_v1.services.authorized_domains.async_client import (
AuthorizedDomainsAsyncClient,
)
from google.cloud.appengine_admin_v1.services.domain_mappings.client import (
DomainMappingsClient,
)
from google.cloud.appengine_admin_v1.services.domain_mappings.async_client import (
DomainMappingsAsyncClient,
)
from google.cloud.appengine_admin_v1.services.firewall.client import FirewallClient
from google.cloud.appengine_admin_v1.services.firewall.async_client import (
FirewallAsyncClient,
)
from google.cloud.appengine_admin_v1.services.instances.client import InstancesClient
from google.cloud.appengine_admin_v1.services.instances.async_client import (
InstancesAsyncClient,
)
from google.cloud.appengine_admin_v1.services.services.client import ServicesClient
from google.cloud.appengine_admin_v1.services.services.async_client import (
ServicesAsyncClient,
)
from google.cloud.appengine_admin_v1.services.versions.client import VersionsClient
from google.cloud.appengine_admin_v1.services.versions.async_client import (
VersionsAsyncClient,
)
from google.cloud.appengine_admin_v1.types.app_yaml import ApiConfigHandler
from google.cloud.appengine_admin_v1.types.app_yaml import ApiEndpointHandler
from google.cloud.appengine_admin_v1.types.app_yaml import ErrorHandler
from google.cloud.appengine_admin_v1.types.app_yaml import HealthCheck
from google.cloud.appengine_admin_v1.types.app_yaml import Library
from google.cloud.appengine_admin_v1.types.app_yaml import LivenessCheck
from google.cloud.appengine_admin_v1.types.app_yaml import ReadinessCheck
from google.cloud.appengine_admin_v1.types.app_yaml import ScriptHandler
from google.cloud.appengine_admin_v1.types.app_yaml import StaticFilesHandler
from google.cloud.appengine_admin_v1.types.app_yaml import UrlMap
from google.cloud.appengine_admin_v1.types.app_yaml import AuthFailAction
from google.cloud.appengine_admin_v1.types.app_yaml import LoginRequirement
from google.cloud.appengine_admin_v1.types.app_yaml import SecurityLevel
from google.cloud.appengine_admin_v1.types.appengine import (
BatchUpdateIngressRulesRequest,
)
from google.cloud.appengine_admin_v1.types.appengine import (
BatchUpdateIngressRulesResponse,
)
from google.cloud.appengine_admin_v1.types.appengine import CreateApplicationRequest
from google.cloud.appengine_admin_v1.types.appengine import (
CreateAuthorizedCertificateRequest,
)
from google.cloud.appengine_admin_v1.types.appengine import CreateDomainMappingRequest
from google.cloud.appengine_admin_v1.types.appengine import CreateIngressRuleRequest
from google.cloud.appengine_admin_v1.types.appengine import CreateVersionRequest
from google.cloud.appengine_admin_v1.types.appengine import DebugInstanceRequest
from google.cloud.appengine_admin_v1.types.appengine import (
DeleteAuthorizedCertificateRequest,
)
from google.cloud.appengine_admin_v1.types.appengine import DeleteDomainMappingRequest
from google.cloud.appengine_admin_v1.types.appengine import DeleteIngressRuleRequest
from google.cloud.appengine_admin_v1.types.appengine import DeleteInstanceRequest
from google.cloud.appengine_admin_v1.types.appengine import DeleteServiceRequest
from google.cloud.appengine_admin_v1.types.appengine import DeleteVersionRequest
from google.cloud.appengine_admin_v1.types.appengine import GetApplicationRequest
from google.cloud.appengine_admin_v1.types.appengine import (
GetAuthorizedCertificateRequest,
)
from google.cloud.appengine_admin_v1.types.appengine import GetDomainMappingRequest
from google.cloud.appengine_admin_v1.types.appengine import GetIngressRuleRequest
from google.cloud.appengine_admin_v1.types.appengine import GetInstanceRequest
from google.cloud.appengine_admin_v1.types.appengine import GetServiceRequest
from google.cloud.appengine_admin_v1.types.appengine import GetVersionRequest
from google.cloud.appengine_admin_v1.types.appengine import (
ListAuthorizedCertificatesRequest,
)
from google.cloud.appengine_admin_v1.types.appengine import (
ListAuthorizedCertificatesResponse,
)
from google.cloud.appengine_admin_v1.types.appengine import ListAuthorizedDomainsRequest
from google.cloud.appengine_admin_v1.types.appengine import (
ListAuthorizedDomainsResponse,
)
from google.cloud.appengine_admin_v1.types.appengine import ListDomainMappingsRequest
from google.cloud.appengine_admin_v1.types.appengine import ListDomainMappingsResponse
from google.cloud.appengine_admin_v1.types.appengine import ListIngressRulesRequest
from google.cloud.appengine_admin_v1.types.appengine import ListIngressRulesResponse
from google.cloud.appengine_admin_v1.types.appengine import ListInstancesRequest
from google.cloud.appengine_admin_v1.types.appengine import ListInstancesResponse
from google.cloud.appengine_admin_v1.types.appengine import ListServicesRequest
from google.cloud.appengine_admin_v1.types.appengine import ListServicesResponse
from google.cloud.appengine_admin_v1.types.appengine import ListVersionsRequest
from google.cloud.appengine_admin_v1.types.appengine import ListVersionsResponse
from google.cloud.appengine_admin_v1.types.appengine import RepairApplicationRequest
from google.cloud.appengine_admin_v1.types.appengine import UpdateApplicationRequest
from google.cloud.appengine_admin_v1.types.appengine import (
UpdateAuthorizedCertificateRequest,
)
from google.cloud.appengine_admin_v1.types.appengine import UpdateDomainMappingRequest
from google.cloud.appengine_admin_v1.types.appengine import UpdateIngressRuleRequest
from google.cloud.appengine_admin_v1.types.appengine import UpdateServiceRequest
from google.cloud.appengine_admin_v1.types.appengine import UpdateVersionRequest
from google.cloud.appengine_admin_v1.types.appengine import AuthorizedCertificateView
from google.cloud.appengine_admin_v1.types.appengine import DomainOverrideStrategy
from google.cloud.appengine_admin_v1.types.appengine import VersionView
from google.cloud.appengine_admin_v1.types.application import Application
from google.cloud.appengine_admin_v1.types.application import UrlDispatchRule
from google.cloud.appengine_admin_v1.types.audit_data import AuditData
from google.cloud.appengine_admin_v1.types.audit_data import CreateVersionMethod
from google.cloud.appengine_admin_v1.types.audit_data import UpdateServiceMethod
from google.cloud.appengine_admin_v1.types.certificate import AuthorizedCertificate
from google.cloud.appengine_admin_v1.types.certificate import CertificateRawData
from google.cloud.appengine_admin_v1.types.certificate import ManagedCertificate
from google.cloud.appengine_admin_v1.types.certificate import ManagementStatus
from google.cloud.appengine_admin_v1.types.deploy import CloudBuildOptions
from google.cloud.appengine_admin_v1.types.deploy import ContainerInfo
from google.cloud.appengine_admin_v1.types.deploy import Deployment
from google.cloud.appengine_admin_v1.types.deploy import FileInfo
from google.cloud.appengine_admin_v1.types.deploy import ZipInfo
from google.cloud.appengine_admin_v1.types.domain import AuthorizedDomain
from google.cloud.appengine_admin_v1.types.domain_mapping import DomainMapping
from google.cloud.appengine_admin_v1.types.domain_mapping import ResourceRecord
from google.cloud.appengine_admin_v1.types.domain_mapping import SslSettings
from google.cloud.appengine_admin_v1.types.firewall import FirewallRule
from google.cloud.appengine_admin_v1.types.instance import Instance
from google.cloud.appengine_admin_v1.types.location import LocationMetadata
from google.cloud.appengine_admin_v1.types.network_settings import NetworkSettings
from google.cloud.appengine_admin_v1.types.operation import CreateVersionMetadataV1
from google.cloud.appengine_admin_v1.types.operation import OperationMetadataV1
from google.cloud.appengine_admin_v1.types.service import Service
from google.cloud.appengine_admin_v1.types.service import TrafficSplit
from google.cloud.appengine_admin_v1.types.version import AutomaticScaling
from google.cloud.appengine_admin_v1.types.version import BasicScaling
from google.cloud.appengine_admin_v1.types.version import CpuUtilization
from google.cloud.appengine_admin_v1.types.version import DiskUtilization
from google.cloud.appengine_admin_v1.types.version import EndpointsApiService
from google.cloud.appengine_admin_v1.types.version import Entrypoint
from google.cloud.appengine_admin_v1.types.version import ManualScaling
from google.cloud.appengine_admin_v1.types.version import Network
from google.cloud.appengine_admin_v1.types.version import NetworkUtilization
from google.cloud.appengine_admin_v1.types.version import RequestUtilization
from google.cloud.appengine_admin_v1.types.version import Resources
from google.cloud.appengine_admin_v1.types.version import StandardSchedulerSettings
from google.cloud.appengine_admin_v1.types.version import Version
from google.cloud.appengine_admin_v1.types.version import Volume
from google.cloud.appengine_admin_v1.types.version import VpcAccessConnector
from google.cloud.appengine_admin_v1.types.version import InboundServiceType
from google.cloud.appengine_admin_v1.types.version import ServingStatus
__all__ = (
"ApplicationsClient",
"ApplicationsAsyncClient",
"AuthorizedCertificatesClient",
"AuthorizedCertificatesAsyncClient",
"AuthorizedDomainsClient",
"AuthorizedDomainsAsyncClient",
"DomainMappingsClient",
"DomainMappingsAsyncClient",
"FirewallClient",
"FirewallAsyncClient",
"InstancesClient",
"InstancesAsyncClient",
"ServicesClient",
"ServicesAsyncClient",
"VersionsClient",
"VersionsAsyncClient",
"ApiConfigHandler",
"ApiEndpointHandler",
"ErrorHandler",
"HealthCheck",
"Library",
"LivenessCheck",
"ReadinessCheck",
"ScriptHandler",
"StaticFilesHandler",
"UrlMap",
"AuthFailAction",
"LoginRequirement",
"SecurityLevel",
"BatchUpdateIngressRulesRequest",
"BatchUpdateIngressRulesResponse",
"CreateApplicationRequest",
"CreateAuthorizedCertificateRequest",
"CreateDomainMappingRequest",
"CreateIngressRuleRequest",
"CreateVersionRequest",
"DebugInstanceRequest",
"DeleteAuthorizedCertificateRequest",
"DeleteDomainMappingRequest",
"DeleteIngressRuleRequest",
"DeleteInstanceRequest",
"DeleteServiceRequest",
"DeleteVersionRequest",
"GetApplicationRequest",
"GetAuthorizedCertificateRequest",
"GetDomainMappingRequest",
"GetIngressRuleRequest",
"GetInstanceRequest",
"GetServiceRequest",
"GetVersionRequest",
"ListAuthorizedCertificatesRequest",
"ListAuthorizedCertificatesResponse",
"ListAuthorizedDomainsRequest",
"ListAuthorizedDomainsResponse",
"ListDomainMappingsRequest",
"ListDomainMappingsResponse",
"ListIngressRulesRequest",
"ListIngressRulesResponse",
"ListInstancesRequest",
"ListInstancesResponse",
"ListServicesRequest",
"ListServicesResponse",
"ListVersionsRequest",
"ListVersionsResponse",
"RepairApplicationRequest",
"UpdateApplicationRequest",
"UpdateAuthorizedCertificateRequest",
"UpdateDomainMappingRequest",
"UpdateIngressRuleRequest",
"UpdateServiceRequest",
"UpdateVersionRequest",
"AuthorizedCertificateView",
"DomainOverrideStrategy",
"VersionView",
"Application",
"UrlDispatchRule",
"AuditData",
"CreateVersionMethod",
"UpdateServiceMethod",
"AuthorizedCertificate",
"CertificateRawData",
"ManagedCertificate",
"ManagementStatus",
"CloudBuildOptions",
"ContainerInfo",
"Deployment",
"FileInfo",
"ZipInfo",
"AuthorizedDomain",
"DomainMapping",
"ResourceRecord",
"SslSettings",
"FirewallRule",
"Instance",
"LocationMetadata",
"NetworkSettings",
"CreateVersionMetadataV1",
"OperationMetadataV1",
"Service",
"TrafficSplit",
"AutomaticScaling",
"BasicScaling",
"CpuUtilization",
"DiskUtilization",
"EndpointsApiService",
"Entrypoint",
"ManualScaling",
"Network",
"NetworkUtilization",
"RequestUtilization",
"Resources",
"StandardSchedulerSettings",
"Version",
"Volume",
"VpcAccessConnector",
"InboundServiceType",
"ServingStatus",
)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Student's t distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"StudentT",
"StudentTWithAbsDfSoftplusScale",
]
@tf_export("distributions.StudentT")
class StudentT(distribution.Distribution):
"""Student's t-distribution.
This distribution has parameters: degree of freedom `df`, location `loc`,
and `scale`.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; df, mu, sigma) = (1 + y**2 / df)**(-0.5 (df + 1)) / Z
where,
y = (x - mu) / sigma
Z = abs(sigma) sqrt(df pi) Gamma(0.5 df) / Gamma(0.5 (df + 1))
```
where:
* `loc = mu`,
* `scale = sigma`, and,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The StudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that `scale` has semantics more similar to standard deviation than
variance. However it is not actually the std. deviation; the Student's
t-distribution std. dev. is `scale sqrt(df / (df - 2))` when `df > 2`.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Student t distribution.
single_dist = tfd.StudentT(df=3)
# Evaluate the pdf at 1, returning a scalar Tensor.
single_dist.prob(1.)
# Define a batch of two scalar valued Student t's.
# The first has degrees of freedom 2, mean 1, and scale 11.
# The second 3, 2 and 22.
multi_dist = tfd.StudentT(df=[2, 3], loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
multi_dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
multi_dist.sample(3)
```
Arguments are broadcast when possible.
```python
# Define a batch of two Student's t distributions.
# Both have df 2 and mean 1, but different scales.
dist = tfd.StudentT(df=2, loc=1, scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
Compute the gradients of samples w.r.t. the parameters:
```python
df = tf.constant(2.0)
loc = tf.constant(2.0)
scale = tf.constant(11.0)
dist = tfd.StudentT(df=df, loc=loc, scale=scale)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [df, loc, scale])
```
"""
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentT"):
"""Construct Student's t distributions.
The distributions have degree of freedom `df`, mean `loc`, and scale
`scale`.
The parameters `df`, `loc`, and `scale` must be shaped in a way that
supports broadcasting (e.g. `df + loc + scale` is a valid operation).
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values.
loc: Floating-point `Tensor`. The mean(s) of the distribution(s).
scale: Floating-point `Tensor`. The scaling factor(s) for the
distribution(s). Note that `scale` is not technically the standard
deviation of this distribution but has semantics more similar to
standard deviation than variance.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[df, loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(df)]
if validate_args else []):
self._df = array_ops.identity(df, name="df")
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype(
(self._df, self._loc, self._scale))
super(StudentT, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._df, self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("df", "loc", "scale"), (
[ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 3)))
@property
def df(self):
"""Degrees of freedom in these Student's t distribution(s)."""
return self._df
@property
def loc(self):
"""Locations of these Student's t distribution(s)."""
return self._loc
@property
def scale(self):
"""Scaling factors of these Student's t distribution(s)."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.df),
array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale)))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
array_ops.broadcast_static_shape(self.df.get_shape(),
self.loc.get_shape()),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=math_ops.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# The sampling method comes from the fact that if:
# X ~ Normal(0, 1)
# Z ~ Chi2(df)
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
[n],
0.5 * df,
beta=0.5,
dtype=self.dtype,
seed=distribution_util.gen_new_seed(seed, salt="student_t"))
samples = normal_sample * math_ops.rsqrt(gamma_sample / df)
return samples * self.scale + self.loc # Abs(scale) not wanted.
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_unnormalized_prob(self, x):
y = (x - self.loc) / self.scale # Abs(scale) superfluous.
return -0.5 * (self.df + 1.) * math_ops.log1p(y**2. / self.df)
def _log_normalization(self):
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
0.5 * np.log(np.pi) +
math_ops.lgamma(0.5 * self.df) -
math_ops.lgamma(0.5 * (self.df + 1.)))
def _cdf(self, x):
# Take Abs(scale) to make subsequent where work correctly.
y = (x - self.loc) / math_ops.abs(self.scale)
x_t = self.df / (y**2. + self.df)
neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t)
return array_ops.where(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)
def _entropy(self):
v = array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)[..., array_ops.newaxis]
u = v * self.df[..., array_ops.newaxis]
beta_arg = array_ops.concat([u, v], -1) / 2.
return (math_ops.log(math_ops.abs(self.scale)) +
0.5 * math_ops.log(self.df) +
special_math_ops.lbeta(beta_arg) +
0.5 * (self.df + 1.) *
(math_ops.digamma(0.5 * (self.df + 1.)) -
math_ops.digamma(0.5 * self.df)))
@distribution_util.AppendDocstring(
"""The mean of Student's T equals `loc` if `df > 1`, otherwise it is
`NaN`. If `self.allow_nan_stats=True`, then an exception will be raised
rather than returning `NaN`.""")
def _mean(self):
mean = self.loc * array_ops.ones(self.batch_shape_tensor(),
dtype=self.dtype)
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
mean,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="mean not defined for components of df <= 1"),
],
mean)
@distribution_util.AppendDocstring("""
The variance for Student's T equals
```
df / (df - 2), when df > 2
infinity, when 1 < df <= 2
NaN, when df <= 1
```
""")
def _variance(self):
# We need to put the tf.where inside the outer tf.where to ensure we never
# hit a NaN in the gradient.
denom = array_ops.where(math_ops.greater(self.df, 2.),
self.df - 2.,
array_ops.ones_like(self.df))
# Abs(scale) superfluous.
var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) *
math_ops.square(self.scale) * self.df / denom)
# When 1 < df <= 2, variance is infinite.
inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
result_where_defined = array_ops.where(
self.df > array_ops.fill(self.batch_shape_tensor(), 2.),
var,
array_ops.fill(self.batch_shape_tensor(), inf, name="inf"))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
return array_ops.where(
math_ops.greater(
self.df,
array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
result_where_defined,
array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
else:
return control_flow_ops.with_dependencies(
[
check_ops.assert_less(
array_ops.ones([], dtype=self.dtype),
self.df,
message="variance not defined for components of df <= 1"),
],
result_where_defined)
def _mode(self):
return array_ops.identity(self.loc)
class StudentTWithAbsDfSoftplusScale(StudentT):
"""StudentT with `df = floor(abs(df))` and `scale = softplus(scale)`."""
def __init__(self,
df,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="StudentTWithAbsDfSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[df, scale]) as name:
super(StudentTWithAbsDfSoftplusScale, self).__init__(
df=math_ops.floor(math_ops.abs(df)),
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
|
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from test_framework.test_framework import TrollcoinTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex
from test_framework.address import script_to_p2sh, key_to_p2pkh
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE
from io import BytesIO
NODE_0 = 0
NODE_1 = 1
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
# Create a scriptPubKey corresponding to either a P2WPKH output for the
# given pubkey, or a P2WSH output of a 1-of-1 multisig for the given
# pubkey. Returns the hex encoding of the scriptPubKey.
def witness_script(use_p2wsh, pubkey):
if (use_p2wsh == False):
# P2WPKH instead
pubkeyhash = hash160(hex_str_to_bytes(pubkey))
pkscript = CScript([OP_0, pubkeyhash])
else:
# 1-of-1 multisig
witness_program = CScript([OP_1, hex_str_to_bytes(pubkey), OP_1, OP_CHECKMULTISIG])
scripthash = sha256(witness_program)
pkscript = CScript([OP_0, scripthash])
return bytes_to_hex_str(pkscript)
# Return a transaction (in hex) that spends the given utxo to a segwit output,
# optionally wrapping the segwit output using P2SH.
def create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount):
pkscript = hex_str_to_bytes(witness_script(use_p2wsh, pubkey))
if (encode_p2sh):
p2sh_hash = hash160(pkscript)
pkscript = CScript([OP_HASH160, p2sh_hash, OP_EQUAL])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), b""))
tx.vout.append(CTxOut(int(amount*COIN), pkscript))
return ToHex(tx)
# Create a transaction spending a given utxo to a segwit output corresponding
# to the given pubkey: use_p2wsh determines whether to use P2WPKH or P2WSH;
# encode_p2sh determines whether to wrap in P2SH.
# sign=True will have the given node sign the transaction.
# insert_redeem_script will be added to the scriptSig, if given.
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
tx_to_witness = create_witnessprogram(use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransaction(tx_to_witness)
assert("errors" not in signed or len(["errors"]) == 0)
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = FromHex(CTransaction(), tx_to_witness)
tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)])
tx_to_witness = ToHex(tx)
return node.sendrawtransaction(tx_to_witness)
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_unspent(node, min_value):
for utxo in node.listunspent():
if utxo['amount'] >= min_value:
return utxo
class SegWitTest(TrollcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-walletprematurewitness", "-rpcserialversion=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 1)
connect_nodes(self.nodes[0], 2)
self.is_network_split = False
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_jsonrpc(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
assert_raises_jsonrpc(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1)
sync_blocks(self.nodes)
def run_test(self):
self.nodes[0].generate(161) #block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].validateaddress(newaddress)["pubkey"])
multiaddress = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]])
self.nodes[i].addwitnessaddress(newaddress)
self.nodes[i].addwitnessaddress(multiaddress)
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
self.log.info("Verify default node can't accept any witness format txs before fork")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
# signed
self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True)
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
# TODO: An old node would see these txs without witnesses and be able to mine them
self.log.info("Verify unsigned bare witness txs in versionbits-setting blocks are valid before the fork")
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][1], False) #block 428
self.success_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][1], False) #block 429
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
self.log.info("Verify unsigned p2sh witness txs with a redeem script in versionbits-settings blocks are valid before the fork")
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][1], False, witness_script(False, self.pubkey[2])) #block 430
self.success_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][1], False, witness_script(True, self.pubkey[2])) #block 431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2]))
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransaction(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# witness with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# witness with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try:
self.nodes[0].importaddress(i,"",False,True)
except JSONRPCException as exp:
assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script")
assert_equal(exp.error["code"], -4)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is
# not in the wallet
# note that no witness address should be returned by unsolvable addresses
# the multisig_without_privkey_address will fail because its keys were not added with importpubkey
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]:
assert_raises_jsonrpc(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]]))
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
premature_witaddress.append(script_to_p2sh(p2wpkh))
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].validateaddress(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after addwitnessaddress
solvable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
premature_witaddress.append(script_to_p2sh(p2wpkh))
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress + [compressed_solvable_address[1]]:
# This will raise an exception
assert_raises_jsonrpc(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].validateaddress(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_unspent(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Event']", 'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'})
},
u'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': u"orm['sentry.AlertRelatedGroup']", 'to': u"orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
u'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
u'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"})
},
u'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': u"orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'db_index': 'False'})
},
u'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'unique': 'True'})
},
u'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
u'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': u"orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Team']", 'null': 'True'})
},
u'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': u"orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': u"orm['sentry.User']"})
},
u'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
u'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': u"orm['sentry.TeamMember']", 'to': u"orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': u"orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': u"orm['sentry.User']"})
},
u'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Implementations of interoperability test methods."""
import enum
import json
import os
import threading
import time
from oauth2client import client as oauth2client_client
import grpc
from grpc.beta import implementations
from src.proto.grpc.testing import empty_pb2
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2
class TestService(test_pb2.TestServiceServicer):
def EmptyCall(self, request, context):
return empty_pb2.Empty()
def UnaryCall(self, request, context):
if request.HasField('response_status'):
context.set_code(request.response_status.code)
context.set_details(request.response_status.message)
return messages_pb2.SimpleResponse(
payload=messages_pb2.Payload(
type=messages_pb2.COMPRESSABLE,
body=b'\x00' * request.response_size))
def StreamingOutputCall(self, request, context):
if request.HasField('response_status'):
context.set_code(request.response_status.code)
context.set_details(request.response_status.message)
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
type=request.response_type,
body=b'\x00' * response_parameters.size))
def StreamingInputCall(self, request_iterator, context):
aggregate_size = 0
for request in request_iterator:
if request.payload is not None and request.payload.body:
aggregate_size += len(request.payload.body)
return messages_pb2.StreamingInputCallResponse(
aggregated_payload_size=aggregate_size)
def FullDuplexCall(self, request_iterator, context):
for request in request_iterator:
if request.HasField('response_status'):
context.set_code(request.response_status.code)
context.set_details(request.response_status.message)
for response_parameters in request.response_parameters:
yield messages_pb2.StreamingOutputCallResponse(
payload=messages_pb2.Payload(
type=request.payload.type,
body=b'\x00' * response_parameters.size))
# NOTE(nathaniel): Apparently this is the same as the full-duplex call?
# NOTE(atash): It isn't even called in the interop spec (Oct 22 2015)...
def HalfDuplexCall(self, request_iterator, context):
return self.FullDuplexCall(request_iterator, context)
def _large_unary_common_behavior(
stub, fill_username, fill_oauth_scope, call_credentials):
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE, response_size=314159,
payload=messages_pb2.Payload(body=b'\x00' * 271828),
fill_username=fill_username, fill_oauth_scope=fill_oauth_scope)
response_future = stub.UnaryCall.future(
request, credentials=call_credentials)
response = response_future.result()
if response.payload.type is not messages_pb2.COMPRESSABLE:
raise ValueError(
'response payload type is "%s"!' % type(response.payload.type))
elif len(response.payload.body) != 314159:
raise ValueError(
'response body of incorrect size %d!' % len(response.payload.body))
else:
return response
def _empty_unary(stub):
response = stub.EmptyCall(empty_pb2.Empty())
if not isinstance(response, empty_pb2.Empty):
raise TypeError(
'response is of type "%s", not empty_pb2.Empty!', type(response))
def _large_unary(stub):
_large_unary_common_behavior(stub, False, False, None)
def _client_streaming(stub):
payload_body_sizes = (27182, 8, 1828, 45904,)
payloads = (
messages_pb2.Payload(body=b'\x00' * size)
for size in payload_body_sizes)
requests = (
messages_pb2.StreamingInputCallRequest(payload=payload)
for payload in payloads)
response = stub.StreamingInputCall(requests)
if response.aggregated_payload_size != 74922:
raise ValueError(
'incorrect size %d!' % response.aggregated_payload_size)
def _server_streaming(stub):
sizes = (31415, 9, 2653, 58979,)
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(
messages_pb2.ResponseParameters(size=sizes[0]),
messages_pb2.ResponseParameters(size=sizes[1]),
messages_pb2.ResponseParameters(size=sizes[2]),
messages_pb2.ResponseParameters(size=sizes[3]),
)
)
response_iterator = stub.StreamingOutputCall(request)
for index, response in enumerate(response_iterator):
if response.payload.type != messages_pb2.COMPRESSABLE:
raise ValueError(
'response body of invalid type %s!' % response.payload.type)
elif len(response.payload.body) != sizes[index]:
raise ValueError(
'response body of invalid size %d!' % len(response.payload.body))
def _cancel_after_begin(stub):
sizes = (27182, 8, 1828, 45904,)
payloads = (messages_pb2.Payload(body=b'\x00' * size) for size in sizes)
requests = (messages_pb2.StreamingInputCallRequest(payload=payload)
for payload in payloads)
response_future = stub.StreamingInputCall.future(requests)
response_future.cancel()
if not response_future.cancelled():
raise ValueError('expected call to be cancelled')
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while not self._values and self._open:
self._condition.wait()
if self._values:
return self._values.pop(0)
else:
raise StopIteration()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify()
def close(self):
with self._condition:
self._open = False
self._condition.notify()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _ping_pong(stub):
request_response_sizes = (31415, 9, 2653, 58979,)
request_payload_sizes = (27182, 8, 1828, 45904,)
with _Pipe() as pipe:
response_iterator = stub.FullDuplexCall(pipe)
for response_size, payload_size in zip(
request_response_sizes, request_payload_sizes):
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(
messages_pb2.ResponseParameters(size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
response = next(response_iterator)
if response.payload.type != messages_pb2.COMPRESSABLE:
raise ValueError(
'response body of invalid type %s!' % response.payload.type)
if len(response.payload.body) != response_size:
raise ValueError(
'response body of invalid size %d!' % len(response.payload.body))
def _cancel_after_first_response(stub):
request_response_sizes = (31415, 9, 2653, 58979,)
request_payload_sizes = (27182, 8, 1828, 45904,)
with _Pipe() as pipe:
response_iterator = stub.FullDuplexCall(pipe)
response_size = request_response_sizes[0]
payload_size = request_payload_sizes[0]
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(
messages_pb2.ResponseParameters(size=response_size),),
payload=messages_pb2.Payload(body=b'\x00' * payload_size))
pipe.add(request)
response = next(response_iterator)
# We test the contents of `response` in the Ping Pong test - don't check
# them here.
response_iterator.cancel()
try:
next(response_iterator)
except grpc.RpcError as rpc_error:
if rpc_error.code() is not grpc.StatusCode.CANCELLED:
raise
else:
raise ValueError('expected call to be cancelled')
def _timeout_on_sleeping_server(stub):
request_payload_size = 27182
with _Pipe() as pipe:
response_iterator = stub.FullDuplexCall(pipe, timeout=0.001)
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
payload=messages_pb2.Payload(body=b'\x00' * request_payload_size))
pipe.add(request)
time.sleep(0.1)
try:
next(response_iterator)
except grpc.RpcError as rpc_error:
if rpc_error.code() is not grpc.StatusCode.DEADLINE_EXCEEDED:
raise
else:
raise ValueError('expected call to exceed deadline')
def _empty_stream(stub):
with _Pipe() as pipe:
response_iterator = stub.FullDuplexCall(pipe)
pipe.close()
try:
next(response_iterator)
raise ValueError('expected exactly 0 responses')
except StopIteration:
pass
def _status_code_and_message(stub):
message = 'test status message'
code = 2
status = grpc.StatusCode.UNKNOWN # code = 2
request = messages_pb2.SimpleRequest(
response_type=messages_pb2.COMPRESSABLE,
response_size=1,
payload=messages_pb2.Payload(body=b'\x00'),
response_status=messages_pb2.EchoStatus(code=code, message=message)
)
response_future = stub.UnaryCall.future(request)
if response_future.code() != status:
raise ValueError(
'expected code %s, got %s' % (status, response_future.code()))
elif response_future.details() != message:
raise ValueError(
'expected message %s, got %s' % (message, response_future.details()))
request = messages_pb2.StreamingOutputCallRequest(
response_type=messages_pb2.COMPRESSABLE,
response_parameters=(
messages_pb2.ResponseParameters(size=1),),
response_status=messages_pb2.EchoStatus(code=code, message=message))
response_iterator = stub.StreamingOutputCall(request)
if response_future.code() != status:
raise ValueError(
'expected code %s, got %s' % (status, response_iterator.code()))
elif response_future.details() != message:
raise ValueError(
'expected message %s, got %s' % (message, response_iterator.details()))
def _compute_engine_creds(stub, args):
response = _large_unary_common_behavior(stub, True, True, None)
if args.default_service_account != response.username:
raise ValueError(
'expected username %s, got %s' % (
args.default_service_account, response.username))
def _oauth2_auth_token(stub, args):
json_key_filename = os.environ[
oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
response = _large_unary_common_behavior(stub, True, True, None)
if wanted_email != response.username:
raise ValueError(
'expected username %s, got %s' % (wanted_email, response.username))
if args.oauth_scope.find(response.oauth_scope) == -1:
raise ValueError(
'expected to find oauth scope "{}" in received "{}"'.format(
response.oauth_scope, args.oauth_scope))
def _jwt_token_creds(stub, args):
json_key_filename = os.environ[
oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
response = _large_unary_common_behavior(stub, True, False, None)
if wanted_email != response.username:
raise ValueError(
'expected username %s, got %s' % (wanted_email, response.username))
def _per_rpc_creds(stub, args):
json_key_filename = os.environ[
oauth2client_client.GOOGLE_APPLICATION_CREDENTIALS]
wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
credentials = oauth2client_client.GoogleCredentials.get_application_default()
scoped_credentials = credentials.create_scoped([args.oauth_scope])
# TODO(https://github.com/grpc/grpc/issues/6799): Eliminate this last
# remaining use of the Beta API.
call_credentials = implementations.google_call_credentials(
scoped_credentials)
response = _large_unary_common_behavior(stub, True, False, call_credentials)
if wanted_email != response.username:
raise ValueError(
'expected username %s, got %s' % (wanted_email, response.username))
@enum.unique
class TestCase(enum.Enum):
EMPTY_UNARY = 'empty_unary'
LARGE_UNARY = 'large_unary'
SERVER_STREAMING = 'server_streaming'
CLIENT_STREAMING = 'client_streaming'
PING_PONG = 'ping_pong'
CANCEL_AFTER_BEGIN = 'cancel_after_begin'
CANCEL_AFTER_FIRST_RESPONSE = 'cancel_after_first_response'
EMPTY_STREAM = 'empty_stream'
STATUS_CODE_AND_MESSAGE = 'status_code_and_message'
COMPUTE_ENGINE_CREDS = 'compute_engine_creds'
OAUTH2_AUTH_TOKEN = 'oauth2_auth_token'
JWT_TOKEN_CREDS = 'jwt_token_creds'
PER_RPC_CREDS = 'per_rpc_creds'
TIMEOUT_ON_SLEEPING_SERVER = 'timeout_on_sleeping_server'
def test_interoperability(self, stub, args):
if self is TestCase.EMPTY_UNARY:
_empty_unary(stub)
elif self is TestCase.LARGE_UNARY:
_large_unary(stub)
elif self is TestCase.SERVER_STREAMING:
_server_streaming(stub)
elif self is TestCase.CLIENT_STREAMING:
_client_streaming(stub)
elif self is TestCase.PING_PONG:
_ping_pong(stub)
elif self is TestCase.CANCEL_AFTER_BEGIN:
_cancel_after_begin(stub)
elif self is TestCase.CANCEL_AFTER_FIRST_RESPONSE:
_cancel_after_first_response(stub)
elif self is TestCase.TIMEOUT_ON_SLEEPING_SERVER:
_timeout_on_sleeping_server(stub)
elif self is TestCase.EMPTY_STREAM:
_empty_stream(stub)
elif self is TestCase.STATUS_CODE_AND_MESSAGE:
_status_code_and_message(stub)
elif self is TestCase.COMPUTE_ENGINE_CREDS:
_compute_engine_creds(stub, args)
elif self is TestCase.OAUTH2_AUTH_TOKEN:
_oauth2_auth_token(stub, args)
elif self is TestCase.JWT_TOKEN_CREDS:
_jwt_token_creds(stub, args)
elif self is TestCase.PER_RPC_CREDS:
_per_rpc_creds(stub, args)
else:
raise NotImplementedError('Test case "%s" not implemented!' % self.name)
|
|
import numpy as np
import nibabel as nib
import numpy.linalg as npl
from dipy.io.dpy import Dpy
def flirt2aff(mat, in_img, ref_img):
""" Transform from `in_img` voxels to `ref_img` voxels given `matfile`
Parameters
----------
matfile : (4,4) array
contents (as array) of output ``-omat`` transformation file from flirt
in_img : img
image passed (as filename) to flirt as ``-in`` image
ref_img : img
image passed (as filename) to flirt as ``-ref`` image
Returns
-------
aff : (4,4) array
Transform from voxel coordinates in ``in_img`` to voxel coordinates in
``ref_img``
"""
in_hdr = in_img.header
ref_hdr = ref_img.header
# get_zooms gets the positive voxel sizes as returned in the header
in_zoomer = np.diag(in_hdr.get_zooms() + (1,))
ref_zoomer = np.diag(ref_hdr.get_zooms() + (1,))
# The in_img voxels to ref_img voxels as recorded in the current affines
current_in2ref = np.dot(ref_img.affine, in_img.affine)
if npl.det(current_in2ref) < 0:
raise ValueError('Negative determinant to current affine mapping - bailing out')
return np.dot(npl.inv(ref_zoomer), np.dot(mat, in_zoomer))
def flirt2aff_files(matfile, in_fname, ref_fname):
""" Map from `in_fname` image voxels to `ref_fname` voxels given `matfile`
Parameters
----------
matfile : str
filename of output ``-omat`` transformation file from flirt
in_fname : str
filename for image passed to flirt as ``-in`` image
ref_fname : str
filename for image passed to flirt as ``-ref`` image
Returns
-------
aff : (4,4) array
Transform from voxel coordinates in image for ``in_fname`` to voxel
coordinates in image for ``ref_fname``
"""
mat = np.loadtxt(matfile)
in_img = nib.load(in_fname)
ref_img = nib.load(ref_fname)
return flirt2aff(mat, in_img, ref_img)
#d101='/home/eg309/Data/TEST_MR10032/subj_10/101/'
d101='/home/eg309/Data/PROC_MR10032/subj_10/101/'
ffa=d101+'1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_bet_FA.nii.gz'
fdis=d101+'1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_nonlin_displacements.nii.gz'
ffareg=d101+'1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_bet_FA_reg.nii.gz'
flirtaff=d101+'1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_affine_transf.mat'
ftrack=d101+'1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_QA_native.dpy'
froi='/home/eg309/Data/PROC_MR10032/NIFTI_ROIs/AnatomicalROIs/ROI01_GCC.nii'
froi2='/home/eg309/Data/PROC_MR10032/NIFTI_ROIs/AnatomicalROIs/ROI02_BCC.nii'
#froi3='/home/eg309/Data/PROC_MR10032/NIFTI_ROIs/AnatomicalROIs/ROI03_SCC.nii'
froi3='/home/eg309/Downloads/SCC_analyze.nii'
ref_fname = '/usr/share/fsl/data/standard/FMRIB58_FA_1mm.nii.gz'
dpr=Dpy(ftrack,'r')
print dpr.track_no
T=dpr.read_indexed([0,1,2,3,2000,1000000])
for t in T:
print t.shape
dpr.close()
track=T[4]
im2im = flirt2aff_files(flirtaff, ffa, ref_fname) #ref_name to be replaced by ffareg
print im2im
from dipy.core.track_metrics import length
print len(track)
print length(track)
#ntrack=np.dot(im2im[:3,:3],track.T)+im2im[:3,[3]]
ntrack=np.dot(track,im2im[:3,:3].T)+im2im[:3,3]
print length(ntrack)
#print length(ntrack.T)
print length(ntrack)/length(track)
#print npl.det(im2im)**(1/3.)
disimg=nib.load(fdis)
ddata=disimg.get_data()
daff=disimg.affine
from scipy.ndimage.interpolation import map_coordinates as mc
di=ddata[:,:,:,0]
dj=ddata[:,:,:,1]
dk=ddata[:,:,:,2]
mci=mc(di,ntrack.T)
mcj=mc(dj,ntrack.T)
mck=mc(dk,ntrack.T)
wtrack=ntrack+np.vstack((mci,mcj,mck)).T
np.set_printoptions(2)
print np.hstack((wtrack,ntrack))
print length(wtrack),length(ntrack),length(track)
imgroi=nib.load(froi)
roidata=imgroi.get_data()
roiaff=imgroi.affine
roiaff=daff
I=np.array(np.where(roidata>0)).T
wI=np.dot(roiaff[:3,:3],I.T).T+roiaff[:3,3]
print wI.shape
wI=wI.astype('f4')
imgroi2=nib.load(froi2)
roidata2=imgroi2.get_data()
roiaff2=imgroi2.affine
roiaff2=daff
I2=np.array(np.where(roidata2>0)).T
wI2=np.dot(roiaff2[:3,:3],I2.T).T+roiaff2[:3,3]
print wI2.shape
wI2=wI2.astype('f4')
imgroi3=nib.load(froi3)
roidata3=imgroi3.get_data()
roiaff3=imgroi3.affine
roiaff3=daff
I3=np.array(np.where(roidata3>0)).T
wI3=np.dot(roiaff3[:3,:3],I3.T).T+roiaff3[:3,3]
print wI3.shape
wI3=wI3.astype('f4')
dpr=Dpy(ftrack,'r')
print dpr.track_no
from time import time
t1=time()
iT=np.random.randint(0,dpr.track_no,10*10**2)
T=dpr.read_indexed(iT)
dpr.close()
t2=time()
print t2-t1,len(T)
Tfinal=[]
'''
for (i,track) in enumerate(T):
print i
ntrack=np.dot(track,im2im[:3,:3].T)+im2im[:3,3]
mci=mc(di,ntrack.T)
mcj=mc(dj,ntrack.T)
mck=mc(dk,ntrack.T)
wtrack=ntrack+np.vstack((mci,mcj,mck)).T
Tfinal.append(np.dot(wtrack,daff[:3,:3].T)+daff[:3,3])
'''
lengths=[len(t) for t in T]
lengths.insert(0,0)
offsets=np.cumsum(lengths)
caboodle=np.concatenate(T,axis=0)
ntrack=np.dot(caboodle,im2im[:3,:3].T)+im2im[:3,3]
mci=mc(di,ntrack.T,order=1)
mcj=mc(dj,ntrack.T,order=1)
mck=mc(dk,ntrack.T,order=1)
wtrack=ntrack+np.vstack((mci,mcj,mck)).T
caboodlew=np.dot(wtrack,daff[:3,:3].T)+daff[:3,3]
#caboodlew=np.dot(wtrack,roiaff[:3,:3].T)+roiaff[:3,3]
Tfinal=[]
for i in range(len(offsets)-1):
s=offsets[i]
e=offsets[i+1]
Tfinal.append(caboodlew[s:e])
#ref_fname = '/usr/share/fsl/data/standard/FMRIB58_FA_1mm.nii.gz'
ref_fname = '/usr/share/fsl/data/standard/FMRIB58_FA-skeleton_1mm.nii.gz'
imgref=nib.load(ref_fname)
refdata=imgref.get_data()
refaff=imgref.affine
'''
refI=np.array(np.where(refdata>5000)).T
wrefI=np.dot(refaff[:3,:3],refI.T).T+refaff[:3,3]
print wrefI.shape
wrefI=wrefI.astype('f4')
'''
from dipy.viz import fos
froi='/home/eg309/Data/ICBM_Wmpm/ICBM_WMPM.nii'
def get_roi(froi,no):
imgroi=nib.load(froi)
roidata=imgroi.get_data()
roiaff=imgroi.affine
I=np.array(np.where(roidata==no)).T
wI=np.dot(roiaff[:3,:3],I.T).T+roiaff[:3,3]
wI=wI.astype('f4')
return wI
from dipy.viz import fos
r=fos.ren()
#fos.add(r,fos.point(wI,fos.blue))
#fos.add(r,fos.point(wI2,fos.yellow))
#fos.add(r,fos.point(wI3,fos.green))
#fos.add(r,fos.point(wrefI,fos.cyan))
#fos.add(r,fos.point(wrefI,fos.yellow))
fos.add(r,fos.point(get_roi(froi,3),fos.blue))
fos.add(r,fos.point(get_roi(froi,4),fos.yellow))
fos.add(r,fos.point(get_roi(froi,5),fos.green))
fos.add(r,fos.line(Tfinal,fos.red))
fos.show(r)
print roiaff
print roiaff2
print roiaff3
print daff
##load roi image
#roiimg=ni.load(froi)
#roidata=roiimg.get_data()
#roiaff=roiimg.affine
#print 'roiaff',roiaff,roidata.shape
#
##load FA image
#faimg=ni.load(ffa)
#data=faimg.get_data()
#aff=faimg.affine
##aff[0,:]=-aff[0,:]
##aff[0,0]=-aff[0,0]
##aff=np.array([[2.5,0,0,-2.5*48],[0,2.5,0,-2.5*39],[0,0,2.5,-2.5*23],[0,0,0,1]])
#
#print 'aff',aff, data.shape
#
##cube = np.array([v for v in np.ndindex(5,5,5)]).T + np.array([[47,47,27]]).T
#cube = np.array([v for v in np.ndindex(data.shape[0],data.shape[1],data.shape[2])]).T
#
##from image space(image coordinates) to native space (world coordinates)
#cube_native = np.dot(aff[:3,:3],cube)+aff[:3,[3]]
##print cube_native.T
#
##load flirt affine
#laff=np.loadtxt(flirtaff)
##laff[0,:]=-laff[0,:]
##laff=np.linalg.inv(laff)
##laff[:3,3]=0
#print 'laff',laff
##print 'inverting laff'
#
#
##from native space(world coordinates) to mni space(world coordinates)
#cube_mni = np.dot(laff[:3,:3],cube_native)+laff[:3,[3]]
##print cube_mni.T
#
#dis=ni.load(fdis)
#disdata=dis.get_data()
#mniaff=dis.affine
#print 'mniaff',mniaff
#
##invert disaff
#mniaffinv= np.linalg.inv(mniaff)
##from mni space(world coordinates) to image mni space (image coordinates)
#cube_mni_grid = np.dot(mniaffinv[:3,:3],cube_mni)+mniaffinv[:3,[3]]
#print cube_mni_grid.shape
#
#cube_mni_grid_nearest=np.round(cube_mni_grid).astype(np.int)
#
#print np.max(cube_mni_grid[0,:])
#print np.max(cube_mni_grid[1,:])
#print np.max(cube_mni_grid[2,:])
#
#print np.max(cube_mni_grid_nearest[0,:])
#print np.max(cube_mni_grid_nearest[1,:])
#print np.max(cube_mni_grid_nearest[2,:])
#
#d0,d1,d2,junk = disdata.shape
#
#cube_mni_grid_nearest[np.where(cube_mni_grid_nearest<0)]=0
#cube_mni_grid_nearest[np.where(cube_mni_grid_nearest>181)]=0
#
#n0=cube_mni_grid_nearest[0,:]
#n1=cube_mni_grid_nearest[1,:]
#n2=cube_mni_grid_nearest[2,:]
'''
n0 = np.min(np.max(cube_mni_grid_nearest[0,:],0),d0)
n1 = np.min(np.max(cube_mni_grid_nearest[1,:],0),d1)
n2 = np.min(np.max(cube_mni_grid_nearest[2,:],0),d2)
'''
#cube_mni_data=np.zeros(disdata.shape[:-1],dtype=np.float32)
#cube_mni_data[n0,n1,n2]=1
'''
D=disdata[n0,n1,n2]
'''
#from dipy.viz import fos
#r=fos.ren()
##fos.add(r,fos.point(cube.T,fos.red))
##fos.add(r,fos.point(cube_native.T,fos.yellow))
#fos.add(r,fos.point(cube_mni.T,fos.green))
#fos.add(r,fos.sphere(np.array([0,0,0]),10))
#
##fos.add(r,fos.point(cube_mni_grid_nearest.T,fos.red))
###fos.add(r,fos.point(cube.T,fos.green))
###fos.add(r,fos.point(cube_mni_grid.T,fos.red))
###fos.add(r,fos.point(cube.T,fos.yellow))
#fos.show(r)
#
#def map_to_index(grid,shape):
# x=grid[0,:]
# y=grid[1,:]
# z=grid[2,:]
# xmin=x.min()
# ymin=y.min()
# zmin=z.min()
# xmax=x.max()
# ymax=y.max()
# zmax=z.max()
# i=(x-xmin)/(xmax-xmin)*shape[0]
# j=(y-ymin)/(ymax-ymin)*shape[1]
# k=(z-zmin)/(zmax-zmin)*shape[2]
# return i,j,k
#
#i,j,k=map_to_index(cube_mni_grid,(182,218,182))
#
#from scipy.ndimage import map_coordinates
#FA_MNI_IMG = map_coordinates(data,np.c_[i, j, k].T)
#from dipy.viz import fos
#r=fos.ren()
#fos.add(r,fos.point(cube_mni.T,fos.blue))
#fos.add(r,fos.point(cube_native.T,fos.green))
#fos.add(r,fos.point(cube_mni_grid.T,fos.red))
#fos.add(r,fos.point(cube.T,fos.yellow))
#fos.show(r)
###corner = cube[:,:].astype(np.int).T
#print corner
###print data[corner[:,0:27],corner[:,0:27],corner[:,0:27]]
#def func(x,y):
# return (x+y)*np.exp(-5.*(x**2+y**2))
#
#def map_to_index(x,y,bounds,N,M):
# xmin,xmax,ymin,ymax=bounds
# i1=(x-xmin)/(xmax-xmin)*N
# i2=(y-ymin)/(ymax-ymin)*M
# return i1,i2
#
#x,y=np.mgrid[-1:1:10j,-1:1:10j]
#fvals=func(x,y)
#
#xn,yn=np.mgrid[-1:1:100j,-1:1:100j]
#i1,i2 = map_to_index(xn,yn,[-1,1,-1,1],*x.shape)
#
#from scipy.ndimage import map_coordinates
#
#fn = map_coordinates(fvals,[i1,i2])
#true = func(xn,yn)
def test_flirt2aff():
from os.path import join as pjoin
from nose.tools import assert_true
import scipy.ndimage as ndi
import nibabel as nib
'''
matfile = pjoin('fa_data',
'1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_affine_transf.mat')
in_fname = pjoin('fa_data',
'1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_bet_FA.nii.gz')
'''
matfile=flirtaff
in_fname = ffa
ref_fname = '/usr/share/fsl/data/standard/FMRIB58_FA_1mm.nii.gz'
res = flirt2aff_files(matfile, in_fname, ref_fname)
mat = np.loadtxt(matfile)
in_img = nib.load(in_fname)
ref_img = nib.load(ref_fname)
assert_true(np.all(res == flirt2aff(mat, in_img, ref_img)))
# mm to mm transform
mm_in2mm_ref = np.dot(ref_img.affine,
np.dot(res, npl.inv(in_img.affine)))
# make new in image thus transformed
in_data = in_img.get_data()
ires = npl.inv(res)
in_data[np.isnan(in_data)] = 0
resliced_data = ndi.affine_transform(in_data,
ires[:3,:3],
ires[:3,3],
ref_img.shape)
resliced_img = nib.Nifti1Image(resliced_data, ref_img.affine)
nib.save(resliced_img, 'test.nii')
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
def slice_arrays(arrays, indices, contiguous=True):
"""Slices batches out of provided arrays (workaround for eager tensors).
Unfortunately eager tensors don't have the same slicing behavior as
Numpy arrays (they follow the same slicing behavior as symbolic TF tensors),
hence we cannot use `generic_utils.slice_arrays` directly
and we have to implement this workaround based on `concat`. This has a
performance cost.
Arguments:
arrays: Single array or list of arrays.
indices: List of indices in the array that should be included in the output
batch.
contiguous: Boolean flag indicating whether the indices are contiguous.
Returns:
Slice of data (either single array or list of arrays).
"""
converted_to_list = False
if not isinstance(arrays, list):
converted_to_list = True
arrays = [arrays]
if any(tensor_util.is_tensor(x) for x in arrays):
if not contiguous:
entries = [[x[i:i + 1] for i in indices] for x in arrays]
slices = [array_ops.concat(x, axis=0) for x in entries]
else:
slices = [x[indices[0]:indices[-1] + 1] for x in arrays]
else:
slices = generic_utils.slice_arrays(arrays, indices)
if converted_to_list:
slices = slices[0]
return slices
def handle_partial_sample_weights(outputs, sample_weights, sample_weight_modes,
check_all_flat=False):
"""Adds 1.0 as sample weights for the outputs for which there is no weight.
Args:
outputs: List of model outputs.
sample_weights: List of sample weight inputs.
sample_weight_modes: List of sample weight modes or None.
check_all_flat: Ensure that inputs are not nested structures. This is not
a free check, so we may not want to run it eagerly every iteration.
Returns:
Tuple of sample weights, one sample weight for every output, and booleans
describing the raw sample weights.
"""
any_sample_weight = sample_weights is not None and any(
w is not None for w in sample_weights)
partial_sample_weight = any_sample_weight and any(
w is None for w in sample_weights)
if not any_sample_weight:
return None, any_sample_weight, partial_sample_weight
if not partial_sample_weight:
return sample_weights, any_sample_weight, partial_sample_weight
if check_all_flat:
nest.assert_same_structure(
list_to_tuple(sample_weights),
list_to_tuple(nest.flatten(sample_weights)))
nest.assert_same_structure(
list_to_tuple(outputs),
list_to_tuple(nest.flatten(outputs)))
if sample_weight_modes is not None:
nest.assert_same_structure(
sample_weight_modes, nest.flatten(sample_weight_modes))
new_sample_weights = []
for i, sw in enumerate(sample_weights):
if sw is None:
as_numpy = isinstance(outputs[i], np.ndarray)
output = outputs[i]
output_shape = output.shape if as_numpy else array_ops.shape(output)
is_temporal = (
sample_weight_modes is not None and
sample_weight_modes[i] == 'temporal')
sw_shape = (output_shape[0],
output_shape[1]) if is_temporal else (output_shape[0],)
new_sample_weights.append(
np.ones(sw_shape) if as_numpy else array_ops.ones(sw_shape))
else:
new_sample_weights.append(sw)
return (list_to_tuple(new_sample_weights),
any_sample_weight, partial_sample_weight)
class RespectCompiledTrainableState(object):
"""Set and restore trainable state if it has changed since compile.
The keras API guarantees that the value of each Layer's `trainable` property
at `Model.compile` time will be used when training that model. In order to
respect this requirement, it may be necessary to set the trainable value of
layers to their compile time values before beginning a training endpoint and
restore the values before returing from said endpoint. This scope checks if
any layer's trainable state has changed since Model compile, and performs this
set and un-set bookkeeping.
However, the trainable state of a layer changes quite infrequently, if ever,
for many kinds of workflows. Moreover, updating every layer in a model is an
expensive operation. As a result, we will only explicitly set and unset the
trainable state of a model if a trainable value has changed since compile.
"""
def __init__(self, model):
self._model = model
self._current_trainable_state = None
self._compiled_trainable_state = None
self._should_set_trainable = False
def __enter__(self):
self._current_trainable_state = self._model._get_trainable_state() # pylint: disable=protected-access
self._compiled_trainable_state = self._model._compiled_trainable_state # pylint: disable=protected-access
# Check to see if any layer's trainable state has changed since `compile`.
for layer, trainable in self._compiled_trainable_state.items():
if (layer in self._current_trainable_state and
trainable != self._current_trainable_state[layer]):
self._should_set_trainable = True
break
# If so, restore the model to its compiled state.
if self._should_set_trainable:
self._model._set_trainable_state(self._compiled_trainable_state) # pylint: disable=protected-access
def __exit__(self, type_arg, value_arg, traceback_arg):
# If we set the values to their compiled state in __enter__, we need to
# restore the original values before leaving the scope.
if self._should_set_trainable:
self._model._set_trainable_state(self._current_trainable_state) # pylint: disable=protected-access
return False # False values do not suppress exceptions
# Allow use of methods not exposed to the user.
# pylint: disable=protected-access
def get_input_shape_and_dtype(layer):
"""Retrieves input shape and input dtype of layer if applicable.
Args:
layer: Layer (or model) instance.
Returns:
Tuple (input_shape, input_dtype). Both could be None if the layer
does not have a defined input shape.
Raises:
ValueError: in case an empty Sequential or Functional model is passed.
"""
def _is_graph_model(layer):
return ((hasattr(layer, '_is_graph_network') and layer._is_graph_network) or
layer.__class__.__name__ == 'Sequential')
# In case of nested models: recover the first layer
# of the deepest model to infer input shape and dtype.
# Subclassed Models may not have been built so can't be checked.
while _is_graph_model(layer):
if not layer.layers:
raise ValueError('An empty Model cannot be used as a Layer.')
layer = layer.layers[0]
if getattr(layer, '_batch_input_shape', None):
return layer._batch_input_shape, layer.dtype
return None, None
# pylint: enable=protected-access
def get_static_batch_size(layer):
"""Gets the static batch size of a Layer.
Arguments:
layer: a `Layer` instance.
Returns:
The static batch size of a Layer.
"""
batch_input_shape, _ = get_input_shape_and_dtype(layer)
if batch_input_shape is not None:
return tensor_shape.Dimension(batch_input_shape[0]).value
return None
def list_to_tuple(maybe_list):
"""Datasets will stack the list of tensor, so switch them to tuples."""
if isinstance(maybe_list, list):
return tuple(maybe_list)
return maybe_list
|
|
try:
import unittest2 as unittest
except ImportError:
import unittest
from graphite.render.attime import parseTimeReference, parseATTime, parseTimeOffset, getUnitString
from datetime import datetime, timedelta
from django.utils import timezone
from .base import TestCase
import pytz
import mock
def mockDateTime(year, month, day, hour, minute, second):
class MockedDateTime(datetime):
@classmethod
def now(cls, tzinfo=None):
if tzinfo:
return tzinfo.localize(cls(year, month, day, hour, minute, second))
return cls(year, month, day, hour, minute, second)
return MockedDateTime
@mock.patch('graphite.render.attime.datetime', mockDateTime(2015, 3, 8, 12, 0, 0))
class ATTimeTimezoneTests(TestCase):
default_tz = timezone.get_current_timezone()
specified_tz = pytz.timezone("America/Los_Angeles")
MOCK_DATE = specified_tz.localize(datetime(2015, 1, 1, 11, 00))
def test_should_return_absolute_time(self):
time_string = '12:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz(self):
time_string = '12:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_should_return_absolute_time_short(self):
time_string = '9:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz_short(self):
time_string = '9:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_YYYYMMDD(self):
time_string = '20150110'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_midnight(self):
expected_time = self.specified_tz.localize(datetime.strptime("0:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_offset_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("1:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+1h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_day_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("0:00_20150309", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_day_and_offset_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("3:00_20150309", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow+3h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_should_return_current_time(self):
expected_time = self.default_tz.localize(datetime.strptime("12:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now")
self.assertEqual(actual_time, expected_time)
def test_now_should_respect_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("12:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_relative_time_in_alternate_zone(self):
expected_time = self.specified_tz.localize(datetime.strptime("11:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("-1h", self.specified_tz)
self.assertEqual(actual_time.hour, expected_time.hour)
def test_should_handle_dst_boundary(self):
expected_time = self.specified_tz.localize(datetime.strptime("04:00_20150308", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+3h", self.specified_tz)
self.assertEqual(actual_time, expected_time)
def test_parse_naive_datetime(self):
time_ref = parseATTime(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50), self.specified_tz)
expected = self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_zone_aware_datetime(self):
time_ref = parseATTime(self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50)), self.specified_tz)
expected = self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
@mock.patch('graphite.render.attime.datetime', mockDateTime(2015, 1, 1, 11, 0, 0))
class parseTimeReferenceTest(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
def test_parse_empty_return_now(self):
time_ref = parseTimeReference('')
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_None_return_now(self):
time_ref = parseTimeReference(None)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_random_string_raise_Exception(self):
with self.assertRaises(Exception):
parseTimeReference("random")
def test_parse_now_return_now(self):
time_ref = parseTimeReference("now")
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_colon_raises_ValueError(self):
with self.assertRaises(ValueError):
parseTimeReference(":")
def test_parse_naive_datetime(self):
time_ref = parseTimeReference(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_zone_aware_datetime(self):
time_ref = parseTimeReference(self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50)))
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_return_hour_of_today(self):
time_ref = parseTimeReference("8:50")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_am(self):
time_ref = parseTimeReference("8:50am")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_pm(self):
time_ref = parseTimeReference("8:50pm")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 20, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_only_am(self):
time_ref = parseTimeReference("8am")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 0))
self.assertEquals(time_ref, expected)
def test_parse_hour_only_pm(self):
time_ref = parseTimeReference("10pm")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 22, 0))
self.assertEquals(time_ref, expected)
def test_parse_noon(self):
time_ref = parseTimeReference("noon")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 12, 0))
self.assertEquals(time_ref, expected)
def test_parse_midnight(self):
time_ref = parseTimeReference("midnight")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_teatime(self):
time_ref = parseTimeReference("teatime")
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 16, 0))
self.assertEquals(time_ref, expected)
def test_parse_yesterday(self):
time_ref = parseTimeReference("yesterday")
expected = self.zone.localize(datetime(2014, 12, 31, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_today(self):
time_ref = parseTimeReference("today")
expected = self.zone.localize(datetime(2015, 1, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_tomorrow(self):
time_ref = parseTimeReference("tomorrow")
expected = self.zone.localize(datetime(2015, 1, 2, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/25/15")
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YYYY(self):
time_ref = parseTimeReference("02/25/2015")
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20140606")
expected = self.zone.localize(datetime(2014, 6, 6, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_onedigits(self):
time_ref = parseTimeReference("january8")
expected = self.zone.localize(datetime(2015, 1, 8, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_twodigits(self):
time_ref = parseTimeReference("january10")
expected = self.zone.localize(datetime(2015, 1, 10, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_threedigits_raise_ValueError(self):
with self.assertRaises(ValueError):
parseTimeReference("january800")
def test_parse_MonthName_without_DayOfMonth_raise_Exception(self):
with self.assertRaises(Exception):
parseTimeReference("january")
def test_parse_monday_return_monday_before_now(self):
time_ref = parseTimeReference("monday")
expected = self.zone.localize(datetime(2014, 12, 29, 0, 0))
self.assertEquals(time_ref, expected)
@mock.patch('graphite.render.attime.datetime', mockDateTime(2010, 3, 30, 00, 0, 0))
class parseTimeReferenceTestBug551771(TestCase):
zone = pytz.utc
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/23/10")
expected = self.zone.localize(datetime(2010, 2, 23, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20100223")
expected = self.zone.localize(datetime(2010, 2, 23, 0, 0))
self.assertEquals(time_ref, expected)
class parseTimeOffsetTest(TestCase):
def test_parse_None_returns_empty_timedelta(self):
time_ref = parseTimeOffset(None)
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_integer_raises_TypeError(self):
with self.assertRaises(TypeError):
parseTimeOffset(1)
def test_parse_string_starting_neither_with_minus_nor_digit_raises_KeyError(self):
with self.assertRaises(KeyError):
parseTimeOffset("Something")
def test_parse_m_as_unit_raises_Exception(self):
with self.assertRaises(Exception):
parseTimeOffset("1m")
def test_parse_digits_only_raises_exception(self):
with self.assertRaises(Exception):
parseTimeOffset("10")
def test_parse_alpha_only_raises_KeyError(self):
with self.assertRaises(KeyError):
parseTimeOffset("month")
def test_parse_minus_only_returns_zero(self):
time_ref = parseTimeOffset("-")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_plus_only_returns_zero(self):
time_ref = parseTimeOffset("+")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_ten_days(self):
time_ref = parseTimeOffset("10days")
expected = timedelta(10)
self.assertEquals(time_ref, expected)
def test_parse_zero_days(self):
time_ref = parseTimeOffset("0days")
expected = timedelta(0)
self.assertEquals(time_ref, expected)
def test_parse_minus_ten_days(self):
time_ref = parseTimeOffset("-10days")
expected = timedelta(-10)
self.assertEquals(time_ref, expected)
def test_parse_five_seconds(self):
time_ref = parseTimeOffset("5seconds")
expected = timedelta(seconds=5)
self.assertEquals(time_ref, expected)
def test_parse_five_minutes(self):
time_ref = parseTimeOffset("5minutes")
expected = timedelta(minutes=5)
self.assertEquals(time_ref, expected)
def test_parse_five_hours(self):
time_ref = parseTimeOffset("5hours")
expected = timedelta(hours=5)
self.assertEquals(time_ref, expected)
def test_parse_five_weeks(self):
time_ref = parseTimeOffset("5weeks")
expected = timedelta(weeks=5)
self.assertEquals(time_ref, expected)
def test_parse_one_month_returns_thirty_days(self):
time_ref = parseTimeOffset("1month")
expected = timedelta(30)
self.assertEquals(time_ref, expected)
def test_parse_two_months_returns_sixty_days(self):
time_ref = parseTimeOffset("2months")
expected = timedelta(60)
self.assertEquals(time_ref, expected)
def test_parse_twelve_months_returns_360_days(self):
time_ref = parseTimeOffset("12months")
expected = timedelta(360)
self.assertEquals(time_ref, expected)
def test_parse_one_year_returns_365_days(self):
time_ref = parseTimeOffset("1year")
expected = timedelta(365)
self.assertEquals(time_ref, expected)
def test_parse_two_years_returns_730_days(self):
time_ref = parseTimeOffset("2years")
expected = timedelta(730)
self.assertEquals(time_ref, expected)
class getUnitStringTest(TestCase):
def test_get_seconds(self):
test_cases = ['s', 'se', 'sec', 'second', 'seconds']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'seconds')
def test_get_minutes(self):
test_cases = ['min', 'minute', 'minutes']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'minutes')
def test_get_hours(self):
test_cases = ['h', 'ho', 'hour', 'hours']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'hours')
def test_get_days(self):
test_cases = ['d', 'da', 'day', 'days']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'days')
def test_get_weeks(self):
test_cases = ['w', 'we', 'week', 'weeks']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'weeks')
def test_get_months(self):
test_cases = ['mon', 'month', 'months']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'months')
def test_get_years(self):
test_cases = ['y', 'ye', 'year', 'years']
for test_case in test_cases:
result = getUnitString(test_case)
self.assertEquals(result, 'years')
def test_m_raises_Exception(self):
with self.assertRaises(Exception):
result = getUnitString("m")
def test_integer_raises_Exception(self):
with self.assertRaises(Exception):
result = getUnitString(1)
@mock.patch('graphite.render.attime.datetime', mockDateTime(2016, 2, 29, 00, 0, 0))
class parseATTimeTestLeapYear(TestCase):
zone = pytz.utc
def test_parse_last_year(self):
time_ref = parseATTime("-1year")
expected = self.zone.localize(datetime(2015, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_leap_year(self):
time_ref = parseATTime("-4years")
expected = self.zone.localize(datetime(2012, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_month(self):
time_ref = parseATTime("-1month")
expected = self.zone.localize(datetime(2016, 1, 30, 0, 0))
self.assertEquals(time_ref, expected)
@mock.patch('graphite.render.attime.datetime',mockDateTime(2013, 2, 28, 00, 0, 0))
class parseATTimeTestLeapYear2(TestCase):
zone = pytz.utc
def test_parse_last_year(self):
time_ref = parseATTime("-1year")
expected = self.zone.localize(datetime(2012, 2, 29, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_leap_year(self):
time_ref = parseATTime("-4years")
expected = self.zone.localize(datetime(2009, 3, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_last_month(self):
time_ref = parseATTime("-1month")
expected = self.zone.localize(datetime(2013, 1, 29, 0, 0))
self.assertEquals(time_ref, expected)
class parseATTimeTest(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
@unittest.expectedFailure
def test_parse_noon_plus_yesterday(self):
time_ref = parseATTime("noon+yesterday")
expected = datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day - 1, 12, 00)
self.assertEquals(time_ref, expected)
class parseATTimeTestNow(TestCase):
default_tz = timezone.get_current_timezone()
specified_tz = pytz.timezone("America/Los_Angeles")
now = '11:0020171013'
MOCK_DATE = specified_tz.localize(datetime(2015, 1, 1, 11, 00))
def test_should_return_absolute_time(self):
time_string = '12:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz(self):
time_string = '12:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_should_return_absolute_time_short(self):
time_string = '9:0020150308'
expected_time = self.default_tz.localize(datetime.strptime(time_string,'%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_should_respect_tz_short(self):
time_string = '9:0020150308'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%H:%M%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_absolute_time_YYYYMMDD(self):
time_string = '20150110'
expected_time = self.specified_tz.localize(datetime.strptime(time_string, '%Y%m%d'))
actual_time = parseATTime(time_string, self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_midnight(self):
expected_time = self.specified_tz.localize(datetime.strptime("0:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_offset_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("1:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight+1h", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_relative_day_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("0:00_20171014", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_relative_day_and_offset_with_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("3:00_20171014", '%H:%M_%Y%m%d'))
actual_time = parseATTime("midnight_tomorrow+3h", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_should_return_current_time(self):
expected_time = self.default_tz.localize(datetime.strptime("11:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now", now=self.now)
self.assertEqual(actual_time, expected_time)
def test_now_should_respect_tz(self):
expected_time = self.specified_tz.localize(datetime.strptime("11:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("now", self.specified_tz, now=self.now)
self.assertEqual(actual_time, expected_time)
def test_relative_time_in_alternate_zone(self):
expected_time = self.specified_tz.localize(datetime.strptime("10:00_20171013", '%H:%M_%Y%m%d'))
actual_time = parseATTime("-1h", self.specified_tz, now=self.now)
self.assertEqual(actual_time.hour, expected_time.hour)
def test_parse_naive_datetime(self):
time_ref = parseATTime(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50), self.specified_tz, now=self.now)
expected = self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_zone_aware_datetime(self):
time_ref = parseATTime(self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50)), self.specified_tz, now=self.now)
expected = self.specified_tz.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
class parseTimeReferenceTestNow(TestCase):
zone = pytz.utc
MOCK_DATE = zone.localize(datetime(2015, 1, 1, 11, 00))
now = zone.localize(datetime(2015, 1, 1, 11, 00))
def test_parse_empty_return_now(self):
time_ref = parseTimeReference('', now=self.now)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_None_return_now(self):
time_ref = parseTimeReference(None, now=self.now)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_random_string_raise_Exception(self):
with self.assertRaises(Exception):
parseTimeReference("random", now=self.now)
def test_parse_now_return_now(self):
time_ref = parseTimeReference("now", now=self.now)
self.assertEquals(time_ref, self.MOCK_DATE)
def test_parse_colon_raises_ValueError(self):
with self.assertRaises(ValueError):
parseTimeReference(":", now=self.now)
def test_parse_naive_datetime(self):
time_ref = parseTimeReference(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50), now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_zone_aware_datetime(self):
time_ref = parseTimeReference(self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50)), now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_return_hour_of_today(self):
time_ref = parseTimeReference("8:50", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_am(self):
time_ref = parseTimeReference("8:50am", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_pm(self):
time_ref = parseTimeReference("8:50pm", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 20, 50))
self.assertEquals(time_ref, expected)
def test_parse_hour_only_am(self):
time_ref = parseTimeReference("8am", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 8, 0))
self.assertEquals(time_ref, expected)
def test_parse_hour_only_pm(self):
time_ref = parseTimeReference("10pm", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 22, 0))
self.assertEquals(time_ref, expected)
def test_parse_noon(self):
time_ref = parseTimeReference("noon", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 12, 0))
self.assertEquals(time_ref, expected)
def test_parse_midnight(self):
time_ref = parseTimeReference("midnight", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_teatime(self):
time_ref = parseTimeReference("teatime", now=self.now)
expected = self.zone.localize(datetime(self.MOCK_DATE.year, self.MOCK_DATE.month, self.MOCK_DATE.day, 16, 0))
self.assertEquals(time_ref, expected)
def test_parse_yesterday(self):
time_ref = parseTimeReference("yesterday", now=self.now)
expected = self.zone.localize(datetime(2014, 12, 31, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_today(self):
time_ref = parseTimeReference("today", now=self.now)
expected = self.zone.localize(datetime(2015, 1, 1, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_tomorrow(self):
time_ref = parseTimeReference("tomorrow", now=self.now)
expected = self.zone.localize(datetime(2015, 1, 2, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YY(self):
time_ref = parseTimeReference("02/25/15", now=self.now)
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MM_slash_DD_slash_YYYY(self):
time_ref = parseTimeReference("02/25/2015", now=self.now)
expected = self.zone.localize(datetime(2015, 2, 25, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_YYYYMMDD(self):
time_ref = parseTimeReference("20140606", now=self.now)
expected = self.zone.localize(datetime(2014, 6, 6, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_onedigits(self):
time_ref = parseTimeReference("january8", now=self.now)
expected = self.zone.localize(datetime(2015, 1, 8, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_twodigits(self):
time_ref = parseTimeReference("january10", now=self.now)
expected = self.zone.localize(datetime(2015, 1, 10, 0, 0))
self.assertEquals(time_ref, expected)
def test_parse_MonthName_DayOfMonth_threedigits_raise_ValueError(self):
with self.assertRaises(ValueError):
parseTimeReference("january800", now=self.now)
def test_parse_MonthName_without_DayOfMonth_raise_Exception(self):
with self.assertRaises(Exception):
parseTimeReference("january", now=self.now)
def test_parse_monday_return_monday_before_now(self):
time_ref = parseTimeReference("monday", now=self.now)
expected = self.zone.localize(datetime(2014, 12, 29, 0, 0))
self.assertEquals(time_ref, expected)
|
|
# -*- coding: utf-8 -*-
import logging
import os
from django.contrib.auth.models import Group, User
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.core.files import File
from django.test import TestCase
from django.utils import timezone
import jobs.presets as presets
from jobs.models import (
ExportConfig, ExportFormat, ExportProfile, Job, Region, Tag
)
logger = logging.getLogger(__name__)
class TestJob(TestCase):
"""
Test cases for Job model
"""
def setUp(self,):
self.path = os.path.dirname(os.path.realpath(__file__))
self.formats = ExportFormat.objects.all() # pre-loaded by 'insert_export_formats' migration
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job(name='TestJob',
description='Test description', event='Nepal activation',
user=self.user, the_geom=the_geom)
self.job.save()
self.uid = self.job.uid
# add the formats to the job
self.job.formats = self.formats
self.job.save()
self.tags = [('building', 'yes'), ('place', 'city'), ('highway', 'service'), ('aeroway', 'helipad')]
for tag in self.tags:
tag = Tag.objects.create(
key=tag[0],
value=tag[1],
job=self.job
)
def test_job_creation(self,):
saved_job = Job.objects.all()[0]
self.assertEqual(self.job, saved_job)
self.assertEquals(self.uid, saved_job.uid)
self.assertIsNotNone(saved_job.created_at)
self.assertIsNotNone(saved_job.updated_at)
saved_formats = saved_job.formats.all()
self.assertIsNotNone(saved_formats)
self.assertItemsEqual(saved_formats, self.formats)
tags = saved_job.tags.all()
self.assertEquals(4, len(tags))
self.assertEquals('Test description', saved_job.description)
self.assertEquals(0, saved_job.configs.all().count())
def test_job_creation_with_config(self,):
saved_job = Job.objects.all()[0]
self.assertEqual(self.job, saved_job)
self.assertEquals(self.uid, saved_job.uid)
self.assertIsNotNone(saved_job.created_at)
self.assertIsNotNone(saved_job.updated_at)
saved_formats = saved_job.formats.all()
self.assertIsNotNone(saved_formats)
self.assertItemsEqual(saved_formats, self.formats)
# attach a configuration to a job
f = File(open(self.path + '/files/hdm_presets.xml'))
filename = f.name.split('/')[-1]
config = ExportConfig.objects.create(name='Test Preset Config', filename=filename,
upload=f, config_type='PRESET', user=self.user)
f.close()
self.assertIsNotNone(config)
uid = config.uid
saved_job.configs.add(config)
saved_config = saved_job.configs.all()[0]
self.assertEqual(config, saved_config)
saved_config.delete() # cleanup
def test_spatial_fields(self,):
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12)) # in africa
the_geom = GEOSGeometry(bbox, srid=4326)
the_geog = GEOSGeometry(bbox)
the_geom_webmercator = the_geom.transform(ct=3857, clone=True)
job = Job.objects.all()[0]
self.assertIsNotNone(job)
geom = job.the_geom
geog = job.the_geog
geom_web = job.the_geom_webmercator
self.assertEqual(the_geom, geom)
self.assertEqual(the_geog, geog)
self.assertEqual(the_geom_webmercator, geom_web)
def test_fields(self, ):
job = Job.objects.all()[0]
self.assertEquals('TestJob', job.name)
self.assertEquals('Test description', job.description)
self.assertEquals('Nepal activation', job.event)
self.assertEqual(self.user, job.user)
def test_str(self, ):
job = Job.objects.all()[0]
self.assertEquals(str(job), 'TestJob')
def test_job_region(self, ):
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12)) # africa
region = Region.objects.filter(the_geom__contains=bbox)[0]
self.assertIsNotNone(region)
self.assertEquals('Africa', region.name)
self.job.region = region
self.job.save()
saved_job = Job.objects.all()[0]
self.assertEqual(saved_job.region, region)
def test_overpass_extents(self,):
job = Job.objects.all()[0]
extents = job.overpass_extents
self.assertIsNotNone(extents)
self.assertEquals(4, len(extents.split(',')))
def test_categorised_tags(self, ):
# delete existing tags
self.job.tags.all().delete()
parser = presets.PresetParser(self.path + '/files/hdm_presets.xml')
tags = parser.parse()
self.assertIsNotNone(tags)
self.assertEquals(238, len(tags))
# save all the tags from the preset
for tag_dict in tags:
tag = Tag.objects.create(
key=tag_dict['key'],
value=tag_dict['value'],
job=self.job,
data_model='osm',
geom_types=tag_dict['geom_types']
)
self.assertEquals(238, self.job.tags.all().count())
job = Job.objects.all()[0]
categories = job.categorised_tags
self.assertIsNotNone(categories)
self.assertEquals(24, len(categories['points']))
self.assertEquals(12, len(categories['lines']))
self.assertEquals(22, len(categories['polygons']))
def test_tags(self,):
self.job.tags.all().delete()
parser = presets.PresetParser(self.path + '/files/hdm_presets.xml')
tags = parser.parse()
self.assertIsNotNone(tags)
self.assertEquals(238, len(tags))
# save all the tags from the preset
for tag_dict in tags:
tag = Tag.objects.create(
key=tag_dict['key'],
value=tag_dict['value'],
job=self.job,
data_model='osm',
geom_types=tag_dict['geom_types']
)
self.assertEquals(238, self.job.tags.all().count())
job = Job.objects.all()[0]
filters = job.filters
class TestExportFormat(TestCase):
def test_str(self,):
kml = ExportFormat.objects.get(slug='kml')
self.assertEquals(unicode(kml), 'kml')
self.assertEquals(str(kml), 'KML Format')
class TestRegion(TestCase):
def test_load_region(self,):
ds = DataSource(os.path.dirname(os.path.realpath(__file__)) + '/../migrations/africa.geojson')
layer = ds[0]
geom = layer.get_geoms(geos=True)[0]
the_geom = GEOSGeometry(geom.wkt, srid=4326)
the_geog = GEOSGeometry(geom.wkt)
the_geom_webmercator = the_geom.transform(ct=3857, clone=True)
region = Region.objects.create(name="Africa", description="African export region",
the_geom=the_geom, the_geog=the_geog, the_geom_webmercator=the_geom_webmercator
)
saved_region = Region.objects.get(uid=region.uid)
self.assertEqual(region, saved_region)
def test_africa_region(self, ):
africa = Region.objects.get(name='Africa')
self.assertIsNotNone(africa)
self.assertEquals('Africa', africa.name)
self.assertIsNotNone(africa.the_geom)
def test_bbox_intersects_region(self, ):
bbox = Polygon.from_bbox((-3.9, 16.6, 7.0, 27.6))
self.assertIsNotNone(bbox)
africa = Region.objects.get(name='Africa')
self.assertIsNotNone(africa)
self.assertTrue(africa.the_geom.intersects(bbox))
def test_get_region_for_bbox(self, ):
bbox = Polygon.from_bbox((-3.9, 16.6, 7.0, 27.6))
regions = Region.objects.all()
found = []
for region in regions:
if region.the_geom.intersects(bbox):
found.append(region)
break
self.assertTrue(len(found) == 1)
self.assertEquals('Africa', found[0].name)
class TestJobRegionIntersection(TestCase):
def setUp(self,):
self.formats = ExportFormat.objects.all() # pre-loaded by 'insert_export_formats' migration
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
bbox = Polygon.from_bbox((36.90, 13.54, 48.52, 20.24)) # overlaps africa / central asia
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom, feature_save=True, feature_pub=True)
self.uid = self.job.uid
# add the formats to the job
self.job.formats = self.formats
self.job.save()
def test_job_region_intersection(self, ):
job = Job.objects.all()[0]
# use the_geog
started = timezone.now()
regions = Region.objects.filter(the_geog__intersects=job.the_geog).intersection(job.the_geog, field_name='the_geog').order_by('-intersection')
finished = timezone.now()
geog_time = finished - started
# logger.debug('Geography lookup took: %s' % geog_time)
self.assertEquals(2, len(regions))
asia = regions[0]
africa = regions[1]
# logger.debug('Asian Geog intersection area: %s' % asia.intersection.area)
self.assertIsNotNone(asia)
self.assertIsNotNone(africa)
self.assertEquals('Central Asia/Middle East', asia.name)
self.assertEquals('Africa', africa.name)
self.assertTrue(asia.intersection.area > africa.intersection.area)
regions = None
# use the_geom
started = timezone.now()
regions = Region.objects.filter(the_geom__intersects=job.the_geom).intersection(job.the_geom, field_name='the_geom').order_by('-intersection')
finished = timezone.now()
geom_time = finished - started
# logger.debug('Geometry lookup took: %s' % geom_time)
self.assertEquals(2, len(regions))
asia = regions[0]
africa = regions[1]
# logger.debug('Asian Geom intersection area: %s' % asia.intersection.area)
self.assertIsNotNone(asia)
self.assertIsNotNone(africa)
self.assertEquals('Central Asia/Middle East', asia.name)
self.assertEquals('Africa', africa.name)
self.assertTrue(asia.intersection.area > africa.intersection.area)
def test_job_outside_region(self, ):
job = Job.objects.all()[0]
bbox = Polygon.from_bbox((2.74, 47.66, 21.61, 60.24)) # outside any region
the_geom = GEOSGeometry(bbox, srid=4326)
job.the_geom = the_geom
job.save()
regions = Region.objects.filter(the_geom__intersects=job.the_geom).intersection(job.the_geom, field_name='the_geom').order_by('-intersection')
self.assertEquals(0, len(regions))
class TestExportConfig(TestCase):
def setUp(self,):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.uid = self.job.uid
def test_create_config(self,):
f = open(self.path + '/files/hdm_presets.xml')
test_file = File(f)
filename = test_file.name.split('/')[-1]
name = 'Test Configuration File'
config = ExportConfig.objects.create(name=name, filename=filename, upload=test_file, config_type='PRESET', user=self.user)
test_file.close()
self.assertIsNotNone(config)
uid = config.uid
saved_config = ExportConfig.objects.get(uid=uid)
self.assertEquals('PRESET', saved_config.config_type)
self.assertEquals(name, saved_config.name)
self.assertFalse(saved_config.published)
self.assertIsNotNone(saved_config)
self.assertEqual(config, saved_config)
sf = File(open(os.path.abspath('.') + '/media/export/config/preset/hdm_presets.xml'))
self.assertIsNotNone(sf) # check the file gets created on disk
saved_config.delete() # clean up
sf.close()
def test_add_config_to_job(self,):
f = open(self.path + '/files/hdm_presets.xml')
test_file = File(f)
filename = test_file.name.split('/')[-1]
name = 'Test Configuration File'
config = ExportConfig.objects.create(name=name, filename=filename, upload=test_file, config_type='PRESET', user=self.user)
test_file.close()
self.assertIsNotNone(config)
uid = config.uid
self.job.configs.add(config)
self.assertEquals(1, self.job.configs.all().count())
class TestTag(TestCase):
def setUp(self, ):
self.formats = ExportFormat.objects.all() # pre-loaded by 'insert_export_formats' migration
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.uid = self.job.uid
# add the formats to the job
self.job.formats = self.formats
self.job.save()
self.path = os.path.dirname(os.path.realpath(__file__))
def test_create_tags(self,):
tags = [
{
'name': 'Airport Ground',
'key': 'aeroway',
'value': 'aerodrome',
'geom_types': ['node', 'area'],
'groups': ['HOT Presets v2.11', 'Transportation', 'Transportation means', 'Airport']
},
]
for tag_dict in tags:
tag = Tag.objects.create(
key=tag_dict['key'],
value=tag_dict['value'],
job=self.job,
data_model='osm',
geom_types=tag_dict['geom_types'],
groups=tag_dict['groups']
)
saved_tags = Tag.objects.all()
self.assertEquals(saved_tags[0].key, 'aeroway')
geom_types = saved_tags[0].geom_types
self.assertEquals(1, len(saved_tags))
self.assertEqual(['node', 'area'], geom_types)
groups = saved_tags[0].groups
self.assertEquals(4, len(groups))
def test_save_tags_from_preset(self,):
parser = presets.PresetParser(self.path + '/files/hdm_presets.xml')
tags = parser.parse()
self.assertIsNotNone(tags)
self.assertEquals(238, len(tags))
for tag_dict in tags:
tag = Tag.objects.create(
key=tag_dict['key'],
value=tag_dict['value'],
job=self.job,
data_model='osm',
geom_types=tag_dict['geom_types'],
groups=tag_dict['groups']
)
self.assertEquals(238, self.job.tags.all().count())
# check the groups got saved correctly
saved_tag = self.job.tags.filter(value='service')[0]
self.assertIsNotNone(saved_tag)
self.assertEquals(3, len(saved_tag.groups))
def test_get_categorised_tags(self,):
parser = presets.PresetParser(self.path + '/files/hdm_presets.xml')
tags = parser.parse()
self.assertIsNotNone(tags)
self.assertEquals(238, len(tags))
for tag_dict in tags:
tag = Tag.objects.create(
key=tag_dict['key'],
value=tag_dict['value'],
job=self.job,
data_model='osm',
geom_types=tag_dict['geom_types'],
groups=tag_dict['groups']
)
self.assertEquals(238, self.job.tags.all().count())
categorised_tags = self.job.categorised_tags
class TestExportProfile(TestCase):
def setUp(self,):
self.group = Group.objects.create(name='TestDefaultExportExtentGroup')
def test_export_profile(self,):
profile = ExportProfile.objects.create(
name='DefaultExportProfile',
max_extent=2500000,
group=self.group
)
self.assertEqual(self.group.export_profile, profile)
self.assertEquals('DefaultExportProfile', profile.name)
self.assertEquals(2500000, profile.max_extent)
|
|
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import numpy as np
import re
from neon import NervanaObject
from neon.data.datasets import load_babi
from neon.data.text import Text
class QA(NervanaObject):
"""
A general QA container to take Q&A dataset, which has already been
vectorized and create a data iterator to feed data to training
"""
def __init__(self, story, query, answer):
self.story, self.query, self.answer = story, query, answer
self.ndata = len(self.story)
self.nbatches = self.ndata/self.be.bsz
self.story_length = self.story.shape[1]
self.query_length = self.query.shape[1]
self.shape = [(self.story_length, 1), (self.query_length, 1)]
def __iter__(self):
"""
Generator that can be used to iterate over this dataset.
Yields:
tuple : the next minibatch of data.
"""
self.batch_index = 0
shuf_idx = self.be.rng.permutation(len(self.story))
self.story = self.story[shuf_idx]
self.query = self.query[shuf_idx]
self.answer = self.answer[shuf_idx]
while self.batch_index < self.nbatches:
batch = slice(self.batch_index*self.be.bsz, (self.batch_index+1)*self.be.bsz)
story_tensor = self.be.array(self.story[batch].T.copy())
query_tensor = self.be.array(self.query[batch].T.copy())
answer_tensor = self.be.array(self.answer[batch].T.copy())
self.batch_index += 1
yield (story_tensor, query_tensor), answer_tensor
def reset(self):
"""
For resetting the starting index of this dataset back to zero.
Relevant for when one wants to call repeated evaluations on the dataset
but don't want to wrap around for the last uneven minibatch
Not necessary when ndata is divisible by batch size
"""
self.batch_index = 0
class BABI(NervanaObject):
"""
This class loads in the Facebook bAbI dataset and vectorizes them into stories,
questions, and answers as described in:
"Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks"
http://arxiv.org/abs/1502.05698
"""
def __init__(self, path='.', task='qa1_single-supporting-fact', subset='en'):
"""
Load bAbI dataset and extract text and read the stories
For a particular task, the class will read both train and test files
and combine the vocabulary.
Args:
path (str): Directory to store the dataset
task (str): a particular task to solve (all bAbI tasks are train
and tested separately)
train (str): to load the train data or test data {'train', 'test'}
subset (str): subset of the dataset to use: {en, en-10k, hn, hn-10k}
"""
print 'Preparing bAbI dataset or extracting from %s' % path
print 'Task is %s/%s' % (subset, task)
self.tasks = [
'qa1_single-supporting-fact',
'qa2_two-supporting-facts',
'qa3_three-supporting-facts',
'qa4_two-arg-relations',
'qa5_three-arg-relations',
'qa6_yes-no-questions',
'qa7_counting',
'qa8_lists-sets',
'qa9_simple-negation',
'qa10_indefinite-knowledge',
'qa11_basic-coreference',
'qa12_conjunction',
'qa13_compound-coreference',
'qa14_time-reasoning',
'qa15_basic-deduction',
'qa16_basic-induction',
'qa17_positional-reasoning',
'qa18_size-reasoning',
'qa19_path-finding',
'qa20_agents-motivations'
]
assert task in self.tasks, "given task is not in the bAbI dataset"
self.train_file, self.test_file = load_babi(path, task)
self.train_parsed = BABI.parse_babi(self.train_file)
self.test_parsed = BABI.parse_babi(self.test_file)
self.compute_statistics()
self.train = self.vectorize_stories(self.train_parsed)
self.test = self.vectorize_stories(self.test_parsed)
@staticmethod
def data_to_list(data):
"""
Clean a block of data and split into lines.
Args:
data (string) : String of bAbI data.
Returns:
list : List of cleaned lines of bAbI data.
"""
split_lines = data.split('\n')[:-1]
return [line.decode('utf-8').strip() for line in split_lines]
@staticmethod
def tokenize(sentence):
"""
Split a sentence into tokens including punctuation.
Args:
sentence (string) : String of sentence to tokenize.
Returns:
list : List of tokens.
"""
return [x.strip() for x in re.split('(\W+)?', sentence) if x.strip()]
@staticmethod
def flatten(data):
"""
Flatten a list of data.
Args:
data (list) : List of list of words.
Returns:
list : A single flattened list of all words.
"""
return reduce(lambda x, y: x + y, data)
@staticmethod
def parse_babi(babi_file):
"""
Parse bAbI data into stories, queries, and answers.
Args:
babi_data (string) : String of bAbI data.
Returns:
list of tuples : List of (story, query, answer) words.
"""
babi_data = open(babi_file).read()
lines = BABI.data_to_list(babi_data)
data, story = [], []
for line in lines:
nid, line = line.split(' ', 1)
if int(nid) == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
substory = [x for x in story if x]
data.append((substory, BABI.tokenize(q), a))
story.append('')
else:
sent = BABI.tokenize(line)
story.append(sent)
return [(BABI.flatten(_story), _question, answer) for _story, _question, answer in data]
def words_to_vector(self, words):
"""
Convert a list of words into vector form.
Args:
words (list) : List of words.
Returns:
list : Vectorized list of words.
"""
return [self.word_to_index[w] if w in self.word_to_index
else self.vocab_size - 1 for w in words]
def one_hot_vector(self, answer):
"""
Create one-hot representation of an answer.
Args:
answer (string) : The word answer.
Returns:
list : One-hot representation of answer.
"""
vector = np.zeros(self.vocab_size)
vector[self.word_to_index[answer]] = 1
return vector
def vectorize_stories(self, data):
"""
Convert (story, query, answer) word data into vectors.
Args:
data (tuple) : Tuple of story, query, answer word data.
Returns:
tuple : Tuple of story, query, answer vectors.
"""
s, q, a = [], [], []
for story, query, answer in data:
s.append(self.words_to_vector(story))
q.append(self.words_to_vector(query))
a.append(self.one_hot_vector(answer))
s = Text.pad_sentences(s, self.story_maxlen)
q = Text.pad_sentences(q, self.query_maxlen)
a = np.array(a)
return (s, q, a)
def compute_statistics(self):
"""
Compute vocab, word index, and max length of stories and queries
"""
all_data = self.train_parsed + self.test_parsed
vocab = sorted(reduce(lambda x, y: x | y, (set(s + q + [a]) for s, q, a in all_data)))
self.vocab = vocab
# Reserve 0 for masking via pad_sequences and self.vocab_size - 1 for <UNK> token
self.vocab_size = len(vocab) + 2
self.word_to_index = dict((c, i + 1) for i, c in enumerate(vocab))
self.index_to_word = dict((i + 1, c) for i, c in enumerate(vocab))
self.story_maxlen = max(map(len, (s for s, _, _ in all_data)))
self.query_maxlen = max(map(len, (q for _, q, _ in all_data)))
|
|
ID = "convert"
permission = 0
import simplejson
import urllib2
from os import makedirs
import os.path as os_path
words = ["to", "in"]
currency_path = "commands/Currency/currency_data.txt"
conversion = { "distance":{"m" : 1.0, "ft" : 0.3048, "km" : 1000, "mi" : 1609.344, "in" : 0.0254,
"cm" : 1.0/100, "mm" : 1.0/1000, "nm" : 1.0/(10**(-9)), "yard" : 0.9144},
"area":{},
"volume":{},
"mass" : { "kg" : 1, "lb" : 0.45359237, "oz" : 0.028349523125, "st" : 6.35029318, "t" : 1000, "shtn" : 907.18474,
"longtn": 1016.0469088},
"currency":{},
"time":{"millisec" : 1/1000.0, "sec" : 1, "min" : 60, "h" : 60*60, "day" :24*60*60, "year": 24*60*60*365},
"speed":{},
"pressure":{},
"temperature" : {"c" : 1, "f" : (1/1.8, -32), "k" : (1, -273.15), "r" : (1/1.8, -491.67)},
"compstorage" : { "bit" : 1, "byte" : 8, "Kbit" : 10**3, "Mbit" : 10**6, "Gbit" : 10**9, "Tbit" : 10**12, "Pbit" : 10**15,
"KB" : 8*(10**3), "MB" : 8*(10**6), "GB" : 8*(10**9), "TB" : 8*(10**12), "PB": 8*(10**15),
"KiB" : 8*(2**10),"MiB" : 8*(2**20), "GiB" : 8*(2**30), "TiB" : 8*(2**40), "PiB" : 8*(2**50)}
}
# We define a dictionary of aliases, i.e. alternative names for units of measure
# Ideas for how it can be improved are welcome!
alias = {"distance" : {"meter" : "m", "feet" : "ft", "mile" :"mi", "inch" : "in"},
"area" : {},
"volume" : {},
"mass" : {"kilogram": "kg", "pound" : "lb", "tonne" : "t", "stone" : "st", "ounce" : "oz"},
"currency" : {},
"time" : {"ms" : "millisec", "millisecond" : "millisec","second" : "sec", "minute" : "min", "hour" : "h"},
"speed" : {},
"pressure" : {},
"temperature" : {"celsius" : "c", "fahrenheit" : "f", "kelvin" : "k", "rankine" : "r"},
"compstorage" : {"byte" : "byte", "bit" : "bit"}
}
# The currency conversion retrieves data from https://openexchangerates.org/
# An App ID is required for using the API from the website.
# Register an account on the website to receive your own App ID, or ask me for one.
# A free account can access the API 1000 times per month
appid = ""
def UpdateRates(appid, path):
try:
dir = "/".join(path.split("/")[:-1])
if not os_path.exists(dir):
print dir
makedirs(dir)
url = "http://openexchangerates.org/api/latest.json?app_id={0}".format(appid)
exrate = urllib2.urlopen(url, timeout = 15)
result = exrate.read()
exrate.close()
data = open(path, "w")
data.write(result)
data.close()
return invertRates(simplejson.loads(result)["rates"])
except Exception as error:
print str(error), "wat"
return None
# We are inverting the data so it is in the
# the CURRENCY->USD format instead of the USD->CURRENCY format
def invertRates(rates):
for item in rates:
rates[item] = 1.0/rates[item]
return rates
def read_CurrencyRate(path):
file = open(path, "r")
data = simplejson.loads(file.read())
file.close()
return invertRates(data["rates"])
def updateDoge(BTC_USD_rate):
try:
website = "https://www.coins-e.com/api/v2/market/DOGE_BTC/depth/"
opener = urllib2.urlopen(website, timeout = 15)
content = simplejson.loads(opener.read())
opener.close()
return (float(content["ltp"])*BTC_USD_rate)
except Exception as error:
print "Error appeared", str(error)
return False
def findGroup(item, conv):
for group in conv:
trueCase = get_true_case(item, group, conv)
if trueCase != False:
return True, group, trueCase
#if item in conv[group]:
# return True, group
return False, None, False
def matchGroup(item1, item2, conv):
for group in conv:
trueCase1 = get_true_case(item1, group, conv)
trueCase2 = get_true_case(item2, group, conv)
if trueCase1 != False and trueCase2 != False:
# We have found the correct cases of the strings item1 and item2!
return True, group, trueCase1, trueCase2
# We haven't been successful and have to return placeholder values
return False, None, False, False
def get_true_case(item, group, dataDict):
for key in dataDict[group]:
if item.lower() == key.lower():
return key
return False
# We check if the local file with the currency exchange rates exists locally.
# If not, we try to download it or use fall-back data
# the fall back data is slightly less accurate and needs to be kept up to date manually.
try:
conversion["currency"] = read_CurrencyRate(currency_path)
alias["currency"] = {"euro" : "eur", "dollar" : "usd", "pound" : "gbp", "yen" : "jpy", "bitcoin" : "btc", "zloty" : "pln"}
dogeRate = updateDoge(conversion["currency"]["BTC"])
if dogeRate != False:
conversion["currency"]["DOGE"] = dogeRate
alias["currency"]["dogecoin"] = "doge"
except Exception as error:
print "ERROR: "+str(error)
result = UpdateRates(appid, currency_path)
# Downloading the rates can fail due to various reasons: invalid appid or the website is down
# If downloading fails, let's use some placeholder data
if result == None:
# Euro, US Dollar, British Pound, Japanese Yen, Australian Dollar, Canadian Dollar
conversion["currency"] = {"eur" : 1/0.7266, "usd" : 1.0, "gbp" : 1/0.6139, "jpy" : 1/102.969, "aud" : 1/1.1206, "cad" : 1/1.0579}
alias["currency"] = {"euro" : "EUR", "dollar" : "USD", "pound" : "gbp", "yen" : "jpy"}
else:
conversion["currency"] = result
alias["currency"] = {"euro" : "eur", "dollar" : "usd", "pound" : "gbp", "yen" : "jpy", "bitcoin" : "btc"}
dogeRate = updateDoge(conversion["currency"]["BTC"])
if dogeRate != False:
conversion["currency"]["DOGE"] = dogeRate
alias["currency"]["dogecoin"] = "doge"
def __initialize__(self, Startup):
#if Startup == True:
self.unit_conversion = conversion
entry = self.helper.newHelp(ID)
entry.addDescription("The 'convert' command allows you to convert various units sharing the same base unit into each other.")
entry.addArgument("number", "The amount of your original unit that should be converted.")
entry.addArgument("original unit", "The name of your original unit.")
entry.addArgument("to/in", "Syntactic sugar. Can be left out.", optional = True)
entry.addArgument("target unit", "The name of the unit to which the original unit should be converted.")
entry.rank = 0
self.helper.registerHelp(entry, overwrite = True)
def execute(self, name, params, channel, userdata, rank):
if len(params) == 1 and params[0].lower() == "update" and rank == "@@":
data = UpdateRates(appid, currency_path)
if data == None:
self.sendChatMessage(self.send, channel, "Failed to update the currency exchange rates.")
else:
self.unit_conversion["currency"] = data
dogeRate = updateDoge(conversion["currency"]["BTC"])
if dogeRate != False:
self.unit_conversion["currency"]["DOGE"] = dogeRate
self.sendChatMessage(self.send, channel, "Updated currency exchange rates.")
elif len(params) == 4 and params[2].lower() in words or len(params) == 3 and params[2].lower() not in words:
num = params[0]
unit1 = params[1].lower()
unit2 = len(params) == 4 and params[3].lower() or params[2].lower()
doesMatch, group, alias_unit1, alias_unit2 = matchGroup(unit1, unit2, self.unit_conversion)
# Case 1: unit1 and unit2 are both not aliases/alternative names
# or not contained within the same group
if not doesMatch:
# To avoid long if/else chains, we will do both searches at once
# 1st check: unit1 is alias, unit2 is normal
# 2nd check: unit1 is normal, unit2 is alias
match_alias1, alias_group1, alias_case1 = findGroup(unit1, alias)
match_normal2, norm_group2, norm_case2 = findGroup(unit2, self.unit_conversion)
match_normal1, norm_group1, norm_case1 = findGroup(unit1, self.unit_conversion)
match_alias2, alias_group2, alias_case2 = findGroup(unit2, alias)
if match_alias1 == False and unit1.endswith("s"):
match_alias1, alias_group1, alias_case1 = findGroup(unit1[:-1], alias)
if match_alias1 == True:
unit1 = unit1[:-1]
if match_alias2 == False and unit2.endswith("s"):
match_alias2, alias_group2, alias_case2 = findGroup(unit2[:-1], alias)
if match_alias2 == True:
unit2 = unit2[:-1]
# Case 2.1
# If a match has been found for both searches, but the groups don't match,
# we check the group of the normal unit if the alias is contained
if match_alias1 == True and match_normal2 == True and alias_group1 != norm_group2:
if alias_case1 in alias[norm_group2]:
doesMatch = True
group = norm_group2
unit1 = alias[norm_group2][alias_case1]
unit1 = get_true_case(unit1, norm_group2, self.unit_conversion)
unit2 = norm_case2
elif match_alias2 == True and match_normal1 == True and alias_group2 != norm_group1:
if alias_case2 in alias[norm_group1]:
doesMatch = True
group = norm_group1
unit2 = alias[norm_group1][alias_case2]
unit1 = norm_case1
unit2 = get_true_case(unit2, alias_group2, self.unit_conversion)
# Case 3.1: unit1 is an alias, unit2 is not an alias, both are contained in the same group
elif match_alias1 == True and match_normal2 == True and alias_group1 == norm_group2:
print alias, alias_group1, alias_case1
unit1 = alias[alias_group1][alias_case1]
unit1 = get_true_case(unit1, alias_group1, self.unit_conversion)
unit2 = norm_case2
doesMatch = True
group = alias_group1
# Case 3.2: unit1 is not an alias, unit2 is an alias, both are contained in the same group
elif match_alias2 == True and match_normal1 == True and norm_group1 == alias_group2:
unit2 = alias[norm_group1][alias_case2]
unit1 = norm_case1
unit2 = get_true_case(unit2, norm_group1, self.unit_conversion)
doesMatch = True
group = norm_group1
#Case 4: unit1 and unit2 are both aliases, and contained within the same group
# or unit1 and unit2 do not exist within the same group or do not exist at all
else:
doesMatch, group, unit1_case, unit2_case = matchGroup(unit1, unit2, alias)
# At this point, we have traversed the dictionary a few times which is not very good
# Does anybody know a better way to do it?
if doesMatch:
unit1 = alias[group][unit1_case]
unit2 = alias[group][unit2_case]
unit1 = get_true_case(unit1, group, self.unit_conversion)
unit2 = get_true_case(unit2, group, self.unit_conversion)
else:
self.sendChatMessage(self.send, channel, "Incompatible or unknown units")
else:
unit1 = alias_unit1
unit2 = alias_unit2
if doesMatch:
if "." in num:
try:
num = float(num)
except:
self.sendChatMessage(self.send, channel, "Invalid number")
return
else:
if not num.isdigit():
self.sendChatMessage(self.send, channel, "Invalid number")
return
else:
num = int(num)
if not isinstance(self.unit_conversion[group][unit1], tuple):
base = self.unit_conversion[group][unit1] * num
else:
modifier = self.unit_conversion[group][unit1][1]
base = self.unit_conversion[group][unit1][0] * (num+modifier)
if not isinstance(self.unit_conversion[group][unit2], tuple):
fin = (1.0/float(self.unit_conversion[group][unit2]))*base
else:
fin = (1.0/float(self.unit_conversion[group][unit2][0]))*base
modifier = self.unit_conversion[group][unit2][1]
fin -= modifier
self.sendChatMessage(self.send, channel, "{0} {1} = {3} {2}".format(num, unit1, unit2, round(fin, 12)))
elif len(params) > 3 and params[2].lower() not in words or len(params) > 4:
self.sendChatMessage(self.send, channel, "Too many arguments")
else:
self.sendChatMessage(self.send, channel, "Not enough arguments")
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProposalMetaData'
db.create_table('reviews_proposalmetadata', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('proposal', self.gf('django.db.models.fields.related.OneToOneField')(related_name='review_metadata', unique=True, to=orm['proposals.Proposal'])),
('num_comments', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('num_reviews', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('latest_activity_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('latest_comment_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('latest_review_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('score', self.gf('django.db.models.fields.FloatField')(default=0.0)),
))
db.send_create_signal('reviews', ['ProposalMetaData'])
# Adding model 'ProposalVersion'
db.create_table('reviews_proposalversion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('conference', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.Conference'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(max_length=400)),
('abstract', self.gf('django.db.models.fields.TextField')()),
('speaker', self.gf('django.db.models.fields.related.ForeignKey')(related_name='proposalversions', to=orm['speakers.Speaker'])),
('submission_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.utcnow)),
('modified_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('kind', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.SessionKind'])),
('audience_level', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.AudienceLevel'])),
('duration', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.SessionDuration'])),
('track', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['conference.Track'], null=True, blank=True)),
('original', self.gf('django.db.models.fields.related.ForeignKey')(related_name='versions', to=orm['proposals.Proposal'])),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('pub_date', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('reviews', ['ProposalVersion'])
# Adding M2M table for field additional_speakers on 'ProposalVersion'
db.create_table('reviews_proposalversion_additional_speakers', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('proposalversion', models.ForeignKey(orm['reviews.proposalversion'], null=False)),
('speaker', models.ForeignKey(orm['speakers.speaker'], null=False))
))
db.create_unique('reviews_proposalversion_additional_speakers', ['proposalversion_id', 'speaker_id'])
# Adding model 'Review'
db.create_table('reviews_review', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('rating', self.gf('django.db.models.fields.CharField')(max_length=2)),
('summary', self.gf('django.db.models.fields.TextField')()),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('proposal', self.gf('django.db.models.fields.related.ForeignKey')(related_name='reviews', to=orm['proposals.Proposal'])),
('proposal_version', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['reviews.ProposalVersion'], null=True, blank=True)),
))
db.send_create_signal('reviews', ['Review'])
# Adding unique constraint on 'Review', fields ['user', 'proposal']
db.create_unique('reviews_review', ['user_id', 'proposal_id'])
# Adding model 'Comment'
db.create_table('reviews_comment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('content', self.gf('django.db.models.fields.TextField')()),
('pub_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('proposal', self.gf('django.db.models.fields.related.ForeignKey')(related_name='comments', to=orm['proposals.Proposal'])),
('proposal_version', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['reviews.ProposalVersion'], null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('deleted_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('deleted_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='deleted_comments', null=True, to=orm['auth.User'])),
('deleted_reason', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('reviews', ['Comment'])
def backwards(self, orm):
# Removing unique constraint on 'Review', fields ['user', 'proposal']
db.delete_unique('reviews_review', ['user_id', 'proposal_id'])
# Deleting model 'ProposalMetaData'
db.delete_table('reviews_proposalmetadata')
# Deleting model 'ProposalVersion'
db.delete_table('reviews_proposalversion')
# Removing M2M table for field additional_speakers on 'ProposalVersion'
db.delete_table('reviews_proposalversion_additional_speakers')
# Deleting model 'Review'
db.delete_table('reviews_review')
# Deleting model 'Comment'
db.delete_table('reviews_comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'conference.audiencelevel': {
'Meta': {'object_name': 'AudienceLevel'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reviews_active': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'reviews_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'reviews_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'conference.sessionduration': {
'Meta': {'object_name': 'SessionDuration'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'minutes': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'conference.sessionkind': {
'Meta': {'object_name': 'SessionKind'},
'closed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'conference.track': {
'Meta': {'ordering': "['order']", 'object_name': 'Track'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'proposals.proposal': {
'Meta': {'object_name': 'Proposal'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'proposal_participations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['speakers.Speaker']"}),
'audience_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.AudienceLevel']"}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
'duration': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionDuration']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionKind']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposals'", 'to': "orm['speakers.Speaker']"}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Track']", 'null': 'True', 'blank': 'True'})
},
'reviews.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deleted_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['proposals.Proposal']"}),
'proposal_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reviews.ProposalVersion']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'reviews.proposal': {
'Meta': {'object_name': 'Proposal', 'db_table': "'proposals_proposal'", '_ormbases': ['proposals.Proposal'], 'proxy': 'True'}
},
'reviews.proposalmetadata': {
'Meta': {'object_name': 'ProposalMetaData'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_activity_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'latest_comment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'latest_review_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_reviews': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'proposal': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'review_metadata'", 'unique': 'True', 'to': "orm['proposals.Proposal']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'reviews.proposalversion': {
'Meta': {'object_name': 'ProposalVersion'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'proposalversion_participations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['speakers.Speaker']"}),
'audience_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.AudienceLevel']"}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
'duration': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionDuration']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionKind']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['proposals.Proposal']"}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'proposalversions'", 'to': "orm['speakers.Speaker']"}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Track']", 'null': 'True', 'blank': 'True'})
},
'reviews.review': {
'Meta': {'unique_together': "(('user', 'proposal'),)", 'object_name': 'Review'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviews'", 'to': "orm['proposals.Proposal']"}),
'proposal_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reviews.ProposalVersion']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'rating': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'speakers.speaker': {
'Meta': {'object_name': 'Speaker'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['reviews']
|
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import exceptions as cinder_exception
from cinderclient.v1 import client as cinder_client_v1
from cinderclient.v2 import client as cinder_client_v2
import mock
import six.moves.urllib.parse as urlparse
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
def _stub_volume(**kwargs):
volume = {
'display_name': None,
'display_description': None,
"attachments": [],
"availability_zone": "cinder",
"created_at": "2012-09-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
}
volume.update(kwargs)
return volume
def _stub_volume_v2(**kwargs):
volume_v2 = {
'name': None,
'description': None,
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
}
volume_v2.update(kwargs)
return volume_v2
_image_metadata = {
'kernel_id': 'fake',
'ramdisk_id': 'fake'
}
class FakeHTTPClient(cinder.cinder_client.HTTPClient):
def _cs_request(self, url, method, **kwargs):
# Check that certain things are called correctly
if method in ['GET', 'DELETE']:
assert 'body' not in kwargs
elif method == 'PUT':
assert 'body' in kwargs
# Call the method
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
kwargs.update(args)
munged_url = url.rsplit('?', 1)[0]
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
munged_url = munged_url.replace('-', '_')
callback = "%s_%s" % (method.lower(), munged_url)
if not hasattr(self, callback):
raise AssertionError('Called unknown API method: %s %s, '
'expected fakes method name: %s' %
(method, url, callback))
# Note the call
self.callstack.append((method, url, kwargs.get('body', None)))
status, body = getattr(self, callback)(**kwargs)
if hasattr(status, 'items'):
return status, body
else:
return {"status": status}, body
def get_volumes_1234(self, **kw):
volume = {'volume': _stub_volume(id='1234')}
return (200, volume)
def get_volumes_nonexisting(self, **kw):
raise cinder_exception.NotFound(code=404, message='Resource not found')
def get_volumes_5678(self, **kw):
"""Volume with image metadata."""
volume = {'volume': _stub_volume(id='1234',
volume_image_metadata=_image_metadata)
}
return (200, volume)
class FakeHTTPClientV2(cinder.cinder_client.HTTPClient):
def _cs_request(self, url, method, **kwargs):
# Check that certain things are called correctly
if method in ['GET', 'DELETE']:
assert 'body' not in kwargs
elif method == 'PUT':
assert 'body' in kwargs
# Call the method
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
kwargs.update(args)
munged_url = url.rsplit('?', 1)[0]
munged_url = munged_url.strip('/').replace('/', '_').replace('.', '_')
munged_url = munged_url.replace('-', '_')
callback = "%s_%s" % (method.lower(), munged_url)
if not hasattr(self, callback):
raise AssertionError('Called unknown API method: %s %s, '
'expected fakes method name: %s' %
(method, url, callback))
# Note the call
self.callstack.append((method, url, kwargs.get('body', None)))
status, body = getattr(self, callback)(**kwargs)
if hasattr(status, 'items'):
return status, body
else:
return {"status": status}, body
def get_volumes_1234(self, **kw):
volume = {'volume': _stub_volume_v2(id='1234')}
return (200, volume)
def get_volumes_nonexisting(self, **kw):
raise cinder_exception.NotFound(code=404, message='Resource not found')
def get_volumes_5678(self, **kw):
"""Volume with image metadata."""
volume = {'volume': _stub_volume_v2(
id='1234',
volume_image_metadata=_image_metadata)
}
return (200, volume)
class FakeCinderClient(cinder_client_v1.Client):
def __init__(self, username, password, project_id=None, auth_url=None,
insecure=False, retries=None, cacert=None, timeout=None):
super(FakeCinderClient, self).__init__(username, password,
project_id=project_id,
auth_url=auth_url,
insecure=insecure,
retries=retries,
cacert=cacert,
timeout=timeout)
self.client = FakeHTTPClient(username, password, project_id, auth_url,
insecure=insecure, retries=retries,
cacert=cacert, timeout=timeout)
# keep a ref to the clients callstack for factory's assert_called
self.callstack = self.client.callstack = []
class FakeCinderClientV2(cinder_client_v2.Client):
def __init__(self, username, password, project_id=None, auth_url=None,
insecure=False, retries=None, cacert=None, timeout=None):
super(FakeCinderClientV2, self).__init__(username, password,
project_id=project_id,
auth_url=auth_url,
insecure=insecure,
retries=retries,
cacert=cacert,
timeout=timeout)
self.client = FakeHTTPClientV2(username, password, project_id,
auth_url, insecure=insecure,
retries=retries, cacert=cacert,
timeout=timeout)
# keep a ref to the clients callstack for factory's assert_called
self.callstack = self.client.callstack = []
class FakeClientFactory(object):
"""Keep a ref to the FakeClient since volume.api.cinder throws it away."""
def __call__(self, *args, **kwargs):
self.client = FakeCinderClient(*args, **kwargs)
return self.client
def assert_called(self, method, url, body=None, pos=-1):
expected = (method, url)
called = self.client.callstack[pos][0:2]
assert self.client.callstack, ("Expected %s %s but no calls "
"were made." % expected)
assert expected == called, 'Expected %s %s; got %s %s' % (expected +
called)
if body is not None:
assert self.client.callstack[pos][2] == body
class FakeClientV2Factory(object):
"""Keep a ref to the FakeClient since volume.api.cinder throws it away."""
def __call__(self, *args, **kwargs):
self.client = FakeCinderClientV2(*args, **kwargs)
return self.client
def assert_called(self, method, url, body=None, pos=-1):
expected = (method, url)
called = self.client.callstack[pos][0:2]
assert self.client.callstack, ("Expected %s %s but no calls "
"were made." % expected)
assert expected == called, 'Expected %s %s; got %s %s' % (expected +
called)
if body is not None:
assert self.client.callstack[pos][2] == body
fake_client_factory = FakeClientFactory()
fake_client_v2_factory = FakeClientV2Factory()
@mock.patch.object(cinder_client_v1, 'Client', fake_client_factory)
class CinderTestCase(test.NoDBTestCase):
"""Test case for cinder volume v1 api."""
def setUp(self):
super(CinderTestCase, self).setUp()
catalog = [{
"type": "volume",
"name": "cinder",
"endpoints": [{"publicURL": "http://localhost:8776/v1/project_id"}]
}]
self.context = context.RequestContext('username', 'project_id',
service_catalog=catalog)
cinder.cinderclient(self.context)
self.api = cinder.API()
def assert_called(self, *args, **kwargs):
fake_client_factory.assert_called(*args, **kwargs)
def test_context_with_catalog(self):
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(
fake_client_factory.client.client.management_url,
'http://localhost:8776/v1/project_id')
def test_cinder_endpoint_template(self):
self.flags(
endpoint_template='http://other_host:8776/v1/%(project_id)s',
group='cinder'
)
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(
fake_client_factory.client.client.management_url,
'http://other_host:8776/v1/project_id')
def test_get_non_existing_volume(self):
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
volume = self.api.get(self.context, '5678')
self.assert_called('GET', '/volumes/5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(volume['volume_image_metadata'], _image_metadata)
def test_cinder_api_insecure(self):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(api_insecure=True, group='cinder')
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(
fake_client_factory.client.client.verify_cert, False)
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(ca_certificates_file=cacert, group='cinder')
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(
fake_client_factory.client.client.verify_cert, cacert)
def test_cinder_http_retries(self):
retries = 42
self.flags(http_retries=retries, group='cinder')
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(
fake_client_factory.client.client.retries, retries)
@mock.patch.object(cinder_client_v2, 'Client', fake_client_v2_factory)
class CinderV2TestCase(test.NoDBTestCase):
"""Test case for cinder volume v2 api."""
def setUp(self):
super(CinderV2TestCase, self).setUp()
catalog = [{
"type": "volumev2",
"name": "cinderv2",
"endpoints": [{"publicURL": "http://localhost:8776/v2/project_id"}]
}]
cinder.CONF.set_override('catalog_info',
'volumev2:cinder:publicURL', group='cinder')
self.context = context.RequestContext('username', 'project_id',
service_catalog=catalog)
cinder.cinderclient(self.context)
self.api = cinder.API()
def tearDown(self):
cinder.CONF.reset()
super(CinderV2TestCase, self).tearDown()
def assert_called(self, *args, **kwargs):
fake_client_v2_factory.assert_called(*args, **kwargs)
def test_context_with_catalog(self):
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(
'http://localhost:8776/v2/project_id',
fake_client_v2_factory.client.client.management_url)
def test_cinder_endpoint_template(self):
self.flags(
endpoint_template='http://other_host:8776/v2/%(project_id)s',
group='cinder'
)
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(
'http://other_host:8776/v2/project_id',
fake_client_v2_factory.client.client.management_url)
def test_get_non_existing_volume(self):
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
volume = self.api.get(self.context, '5678')
self.assert_called('GET', '/volumes/5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
def test_cinder_api_insecure(self):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(api_insecure=True, group='cinder')
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertFalse(fake_client_v2_factory.client.client.verify_cert)
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(ca_certificates_file=cacert, group='cinder')
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(cacert,
fake_client_v2_factory.client.client.verify_cert)
def test_cinder_http_retries(self):
retries = 42
self.flags(http_retries=retries, group='cinder')
self.api.get(self.context, '1234')
self.assert_called('GET', '/volumes/1234')
self.assertEqual(retries, fake_client_v2_factory.client.client.retries)
def test_cinder_http_timeout(self):
timeout = 123
self.flags(http_timeout=timeout, group='cinder')
self.api.get(self.context, '1234')
self.assertEqual(timeout,
fake_client_v2_factory.client.client.timeout)
|
|
from ipaddress import ip_network
from bgpfu.prefixlist import PrefixSet
def test_prefixset_init_empty():
ps = PrefixSet()
assert ps is not None
assert len(ps) == 0
def test_prefixset_init_meta():
value = "test_value"
ps = PrefixSet(meta=value)
assert ps.meta("meta") == value
assert ps.meta("atem") is None
try:
ps.meta("atem", strict=True)
except Exception as e:
assert isinstance(e, KeyError)
assert isinstance(ps.meta(), dict) and len(ps.meta()) == 1
def test_prefixset_init_dict():
data4 = {"ipv4": [{"prefix": "10.0.0.0/8", "greater-equal": 16, "less-equal": 24}]}
data6 = {
"ipv6": [{"prefix": "2001:db8::/32", "greater-equal": 48, "less-equal": 64}]
}
data = dict()
data.update(data4)
data.update(data6)
dicts = (data4, data6, data)
for d in dicts:
ps = PrefixSet(d)
assert ps
def test_prefixset_init_str():
strings = ["10.0.0.0/8^16-24", "2001:db8::/32^48-64"]
for s in strings:
ps = PrefixSet(s)
assert ps
def test_prefix_set_init_str_invalid():
strings = ["10.0.0.1/8", "2000:g::/32", "10.0.0.0/8^8", ""]
for s in strings:
try:
ps = PrefixSet(s)
except Exception as e:
assert isinstance(e, ValueError)
def test_prefixset_init_dict_invalid_af():
dicts = [
{"ipv5": [{"prefix": "10.0.0.0/8"}]},
{"opv6": [{"prefix": "2001:db8::/32"}]},
]
for d in dicts:
ps = PrefixSet(d)
assert len(ps) == 0
def test_prefixset_init_dict_invalid_prefix():
dicts = [
{"ipv4": [{"prefix": "10.0.0.1/8"}]},
{"ipv6": [{"prefix": "2001:g::/32"}]},
]
for d in dicts:
ps = PrefixSet(d)
assert len(ps) == 0
def test_prefixset_init_dict_af_mismatch():
dicts = [
{"ipv6": [{"prefix": "10.0.0.0/8", "greater-equal": 16, "less-equal": 24}]},
{"ipv4": [{"prefix": "2001:db8::/32", "greater-equal": 48, "less-equal": 64}]},
]
for d in dicts:
ps = PrefixSet(d)
assert len(ps) == 0
def test_prefixset_init_min_length_invalid():
dicts = [
{"ipv4": [{"prefix": "10.0.0.0/16", "greater-equal": 8, "less-equal": 16}]},
{"ipv6": [{"prefix": "2001:db8::/48", "greater-equal": 32, "less-equal": 48}]},
]
for d in dicts:
ps = PrefixSet(d)
assert len(ps) == 1
def test_prefixset_init_max_length_invalid():
dicts = [
{"ipv4": [{"prefix": "10.0.0.0/32", "greater-equal": 32, "less-equal": 48}]},
{
"ipv6": [
{"prefix": "2001:db8::/128", "greater-equal": 128, "less-equal": 256}
]
},
]
for d in dicts:
ps = PrefixSet(d)
assert len(ps) == 1
def test_prefixset_init_length_missing():
dicts = [
{"ipv4": [{"prefix": "10.0.0.0/8"}]},
{"ipv6": [{"prefix": "2001:db8::/32"}]},
]
for d in dicts:
ps = PrefixSet(d)
assert len(ps) == 1
def test_prefixset_init_min_length_missing():
dicts = [
{"ipv4": [{"prefix": "10.0.0.0/8", "less-equal": 8}]},
{"ipv6": [{"prefix": "2001:db8::/32", "less-equal": 32}]},
]
for d in dicts:
ps = PrefixSet(d)
assert len(ps) == 1
def test_prefixset_init_max_length_missing():
dicts = [
{"ipv4": [{"prefix": "10.0.0.0/30", "greater-equal": 31}]},
{"ipv6": [{"prefix": "2001:db8::/126", "greater-equal": 127}]},
]
for d in dicts:
ps = PrefixSet(d)
assert len(ps) == 6
def test_prefixset_len():
p4, l4, m4, n4 = "10.0.0.0", 8, 16, 24
prefix4 = "%s/%d" % (p4, l4)
p6, l6, m6, n6 = "2001:db8::", 32, 40, 48
prefix6 = "%s/%d" % (p6, l6)
e_count = 2 ** (n4 - l4 + 1) + 2 ** (n6 - l6 + 1) - 2 ** (m4 - l4) - 2 ** (m6 - l6)
data = {
"ipv4": [{"prefix": prefix4, "greater-equal": m4, "less-equal": n4}],
"ipv6": [{"prefix": prefix6, "greater-equal": m6, "less-equal": n6}],
}
ps = PrefixSet(data)
r_count = len(ps)
assert e_count == r_count != 0
def test_prefixset_iter_prefixes():
strings = ["10.0.0.0/8", "2001:db8::/32"]
for s in strings:
ps = PrefixSet(s)
assert list(ps.prefixes()) == [ip_network(s)]
def test_prefixset_contains_prefix():
strings = ["10.0.0.0/8", "2001:db8::/32"]
for s in strings:
ps = PrefixSet(s)
assert ip_network(s) in ps
def test_prefixset_intersection():
tuples = [
("10.0.0.0/8^16-24", "10.0.0.0/20"),
("2001:db8::/32^48-64", "2001:db8::/56"),
]
for s1, s2 in tuples:
ps1 = PrefixSet(s1)
ps2 = PrefixSet(s2)
assert list((ps1 & ps2).prefixes()) == [ip_network(s2)]
def test_prefixset_union():
tuples = [("10.0.0.0/16", "10.1.0.0/16"), ("2001:db8::/48", "2001:db8:ffff::/48")]
for s1, s2 in tuples:
ps1 = PrefixSet(s1)
ps2 = PrefixSet(s2)
assert set((ps1 | ps2).prefixes()) == {ip_network(s1), ip_network(s2)}
def test_prefixset_data_no_aggr():
data = {"ipv4": [{"prefix": "10.0.0.0/8"}], "ipv6": [{"prefix": "2001:db8::/32"}]}
ps = PrefixSet(data)
assert ps.data(aggregate=False) == data
def no_test_prefixset_data_aggr():
"""Test broken from py3 conversion."""
pre_data = {
"ipv4": [
{"prefix": "10.0.0.0/9"},
{"prefix": "10.128.0.0/9"},
{"prefix": "10.0.0.0/10"},
{"prefix": "10.64.0.0/10"},
{"prefix": "10.128.0.0/10"},
{"prefix": "10.192.0.0/10"},
],
"ipv6": [],
}
post_data = {
"ipv4": [{"prefix": "10.0.0.0/8", "greater-equal": 9, "less-equal": 10}],
"ipv6": [],
}
ps = PrefixSet(pre_data)
assert ps.data(aggregate=True) == post_data
if __name__ == "__main__":
test_prefixset_data_aggr()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
# pylint:disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
from tensorflow.python.keras.engine import training_utils_v1
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def set_weights(distribution_strategy, dist_model, weights):
"""Sets the weights of the replicated models.
The weights of the replicated models are set to the weights of the original
model. The weights of the replicated model are Mirrored variables and hence
we need to use the `update` call within a DistributionStrategy scope.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
dist_model: The replicated models on the different devices.
weights: The weights of the original model.
"""
assign_ops = []
for layer in dist_model.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
if ops.executing_eagerly_outside_functions():
sw.assign(w)
else:
assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))
weights = weights[num_param:]
if not ops.executing_eagerly_outside_functions():
K.get_session(assign_ops).run(assign_ops)
def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs,
grouped_updates=None, grouped_session_args=None,
with_loss_tensor=False):
"""Unwrap the list of values contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of values on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_inputs: PerReplica inputs returned from the train or test function
that we ran on each device.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
grouped_updates: PerReplica updates returned from the train or test function
that we ran on each device.
grouped_session_args: PerReplica session args returned from the train or
test function that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica parameters.
"""
# Unwrap per device values returned from each model's train function.
# This will be used to construct the main train function.
all_inputs = flatten_per_replica_values(distribution_strategy,
grouped_inputs)
all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs,
with_loss_tensor)
if grouped_updates:
all_updates = flatten_per_replica_values(distribution_strategy,
grouped_updates)
else:
all_updates = None
all_session_args = {}
if grouped_session_args:
grouped_feed_dict = grouped_session_args.get('feed_dict')
if grouped_feed_dict:
all_session_args['feed_dict'] = flatten_per_replica_values(
distribution_strategy, grouped_feed_dict)
grouped_fetches = grouped_session_args.get('fetches')
if grouped_fetches:
all_session_args['fetches'] = flatten_per_replica_values(
distribution_strategy, grouped_fetches)
# TODO(priyag): Return only non empty/None values
return all_inputs, all_outputs, all_updates, all_session_args
def unwrap_output_dict(strategy, grouped_outputs, mode):
"""Unwrap the list of outputs contained in the PerReplica parameters."""
if mode == ModeKeys.PREDICT:
return flatten_per_replica_values(strategy, grouped_outputs)
# In the case of fit/eval, the grouped_outputs is a dict, whereas in predict,
# the output is as same structure as model output. They need to be treated
# differently
total_loss = strategy.reduce(reduce_util.ReduceOp.SUM,
grouped_outputs['total_loss'][0], axis=None)
output_losses = flatten_per_replica_values(strategy,
grouped_outputs['output_losses'])
metrics = flatten_per_replica_values(strategy,
grouped_outputs['metrics'])
batch_size = strategy.reduce(reduce_util.ReduceOp.SUM,
grouped_outputs['batch_size'], axis=None)
if (dist_utils.is_tpu_strategy(strategy) and
ops.executing_eagerly_outside_functions()):
# Choose 1 value per replica in the TPU case since all replicas produce the
# same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the graph
# code path as well.
output_losses = output_losses[::strategy.num_replicas_in_sync]
metrics = metrics[::strategy.num_replicas_in_sync]
return {'total_loss': [total_loss],
'output_losses': output_losses,
'metrics': metrics,
'batch_size': batch_size}
def unwrap_outputs(distribution_strategy, grouped_outputs,
with_loss_tensor=False):
"""Unwrap the list of outputs contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of outputs on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica outputs.
"""
if not with_loss_tensor:
return flatten_per_replica_values(distribution_strategy,
grouped_outputs)
if not isinstance(grouped_outputs, list):
grouped_outputs = [grouped_outputs]
# reduce loss tensor before adding it to the list of fetches
loss = distribution_strategy.reduce(reduce_util.ReduceOp.SUM,
grouped_outputs[0], axis=None)
all_outputs = flatten_per_replica_values(distribution_strategy,
grouped_outputs[1:])
if (dist_utils.is_tpu_strategy(distribution_strategy) and
ops.executing_eagerly_outside_functions()):
# Choose 1 value per replica in the TPU case since all replicas produce the
# same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the graph
# code path as well.
all_outputs = all_outputs[::distribution_strategy.num_replicas_in_sync]
return [loss] + all_outputs
def flatten_per_replica_values(distribution_strategy, per_replica_values):
"""Unwraps and flattens a nest of PerReplica parameters.
PerReplica values have one value associated with each device. Each entry in
the PerReplica dict has a device `key` and the corresponding value on the
device as the `value`. In this function we take a PerReplica value or a list
of PerReplica values and return all the values in the PerReplica dict.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
per_replica_values: List of PerReplica object or a single PerReplica object.
Returns:
List of values of all the PerReplica objects.
"""
# pylint: disable=g-complex-comprehension
# This function takes a PerReplica object or a list of PerReplica objects and
# returns all the values associated with it.
return [e for flattened in nest.flatten(per_replica_values)
for e in distribution_strategy.unwrap(flattened)]
def validate_callbacks(input_callbacks, optimizer):
"""Validate whether given callbacks are supported by DistributionStrategy.
Args:
input_callbacks: List of callbacks passed by the user to fit.
optimizer: Optimizer instance used to train the model.
Raises:
ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the
callbacks passed.
ValueError: If `write_grads` is one of the parameters passed as part of the
TensorBoard callback.
"""
if input_callbacks:
for callback in input_callbacks:
if isinstance(callback, (callbacks.LearningRateScheduler,
callbacks.ReduceLROnPlateau)):
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError('You must specify a Keras Optimizer V2 when using '
'%s callback with DistributionStrategy.' % callback)
# If users want to use the TensorBoard callback they cannot use certain
# features of the callback that involve accessing model attributes and
# running ops.
if isinstance(callback, callbacks.TensorBoard):
if getattr(callback, 'write_grads', False):
logging.warning(
UserWarning(
'`write_grads` in the TensorBoard callback is not supported '
'when using DistributionStrategy. Setting `write_grads` '
'to `False`.'))
callback.write_grads = False
def validate_distributed_dataset_inputs(distribution_strategy, x, y,
sample_weights=None):
"""Validate all the components of a DistributedValue Dataset input.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`/`evaluate`.
x: Input Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. x can also be a tuple or dict. The keys of the
dict should match the names of the input layers of the model.
y: Target Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. y can also be a tuple or dict. The keys of the
dict should match the names of the output layers of the model.
sample_weights: Sample weights Dataset DistributedValue object. For example,
when we use `MirroredStrategy` this is a PerReplica object with a tensor
for each device set in the dict.
Returns:
The unwrapped values list of the x and y DistributedValues inputs.
Raises:
ValueError: If x and y do not have support for being evaluated as tensors.
or if x and y contain elements that are not tensors or if x and y
contain elements that have a shape or dtype mismatch.
"""
# If the input and target used to call the model are not dataset tensors,
# we need to raise an error. When using a DistributionStrategy, the input
# and targets to a model should be from a `tf.data.Dataset`.
# If each element of x and y are not tensors, we cannot standardize and
# validate the input and targets.
x_values_list = validate_per_replica_inputs(distribution_strategy, x)
if y is not None:
y_values_list = validate_per_replica_inputs(distribution_strategy, y)
else:
y_values_list = None
if sample_weights is not None:
sample_weights_list = validate_per_replica_inputs(distribution_strategy,
sample_weights)
else:
sample_weights_list = None
# Return the unwrapped values to avoid calling `unwrap` a second time.
return x_values_list, y_values_list, sample_weights_list
def validate_per_replica_inputs(distribution_strategy, x):
"""Validates PerReplica dataset input list.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`, `evaluate` and `predict`.
x: A list of PerReplica objects that represent the input or
target values.
Returns:
List containing the first element of each of the PerReplica objects in
the input list.
Raises:
ValueError: If any of the objects in the `per_replica_list` is not a tensor.
"""
# Convert the inputs and targets into a list of PerReplica objects.
per_replica_list = nest.flatten(x, expand_composites=True)
x_values_list = []
for x in per_replica_list:
# At this point x should contain only tensors.
x_values = distribution_strategy.unwrap(x)
for value in x_values:
if not tensor_util.is_tensor(value):
raise ValueError('Dataset input to the model should be tensors instead '
'they are of type {}'.format(type(value)))
if not context.executing_eagerly():
# Validate that the shape and dtype of all the elements in x are the same.
validate_all_tensor_shapes(x, x_values)
validate_all_tensor_types(x, x_values)
x_values_list.append(x_values[0])
return x_values_list
def validate_all_tensor_types(x, x_values):
x_dtype = x_values[0].dtype
for i in range(1, len(x_values)):
if x_dtype != x_values[i].dtype:
raise ValueError('Input tensor dtypes do not match for distributed tensor'
' inputs {}'.format(x))
def validate_all_tensor_shapes(x, x_values):
# Validate that the shape of all the elements in x have the same shape
x_shape = x_values[0].shape.as_list()
for i in range(1, len(x_values)):
if x_shape != x_values[i].shape.as_list():
raise ValueError('Input tensor shapes do not match for distributed tensor'
' inputs {}'.format(x))
def _wait_for_variable_initialization(session):
"""Utility to wait for variables to be initialized."""
all_variables = K._get_variables(K.get_graph()) # pylint: disable=protected-access
candidate_vars = []
for v in all_variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if not candidate_vars:
return
while True:
is_initialized = session.run(
[variables.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True # pylint: disable=protected-access
if not uninitialized_vars:
break
def init_restore_or_wait_for_variables():
"""Initialize or restore variables or wait for variables to be initialized."""
session = K._get_session() # pylint: disable=protected-access
if not multi_worker_util.has_worker_context(
) or multi_worker_util.should_load_checkpoint():
# TODO(yuefengz): if checkpoints exist, restore from checkpoint.
K._initialize_variables(session) # pylint: disable=protected-access
else:
_wait_for_variable_initialization(session)
def validate_inputs(x, y):
"""Validate inputs when using DistributionStrategy.
Args:
x: Model Inputs.
y: Model Targets.
Raises:
ValueError: if input is not a Dataset or a numpy array(when we use
MirroredStrategy).
"""
if (isinstance(x, iterator_ops.Iterator) or
isinstance(y, iterator_ops.Iterator)):
raise ValueError('`DistributionStrategy` does not support inputs of type '
'Iterator. You must pass a `tf.data.Dataset` object or a '
'numpy array as input.')
def is_dataset_shape_fully_defined(dataset):
"""Returns whether a dataset contains a final partial batch."""
shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))
unknown_shapes = [s for s in shapes if not s.is_fully_defined()]
return not unknown_shapes
def process_batch_and_step_size(strategy,
inputs,
batch_size,
steps_per_epoch,
mode,
validation_split=0.):
"""Process the batch size and step size based on input and dist strategy."""
first_x_value = nest.flatten(inputs)[0]
if isinstance(first_x_value, np.ndarray):
num_samples = first_x_value.shape[0]
if validation_split and 0. < validation_split < 1.:
num_samples = int(num_samples * (1 - validation_split))
# Until support for partial batch is implemented across all
# functions and distribution strategy, we pass `mode` to selectively
# relax the constraint to consume all the training samples.
steps_per_epoch, batch_size = get_input_params(
strategy, num_samples, steps_per_epoch, batch_size, mode=mode)
return batch_size, steps_per_epoch
def get_input_params(distribution_strategy,
num_samples,
steps,
batch_size,
mode=None):
"""Calculate the number of batches and steps/steps_per_epoch.
Args:
distribution_strategy: The DistributionStrategy used to compile the model.
num_samples: The number of samples from which we determine the batch size
and steps.
steps: The specified number of steps.
batch_size: The specified batch_size.
mode: ModeKey representing whether input will be used for training,
evaluation, or prediction. This is used to relax the constraints on
consuming all the training samples to keep compatibility till we support
partial batches. If none, then partial batches are not allowed.
Returns:
steps: The steps or steps_per_epoch argument depending on if a user is
calling `fit`, `evaluate` or `predict`. If the is_training flag is set
we don't require the number of samples to be used completely.
batch_size: The batch size to be used in model iterations.
Raises:
ValueError: If the number of batches or steps evaluates to 0.
"""
# TODO(b/118776054): Use global batch size for Keras/DS support.
# Currently this is only supported in TPUStrategy and CoreMirroredStrategy.
use_per_replica_batch = not dist_utils.global_batch_size_supported(
distribution_strategy)
# TODO(b/128995245): In eager mode, uneven batch sizes are allowed except for
# `fit()` on TPUStrategy.
# In graph mode, the zero batch case in batch norm is not handled due to
# XLA-GPU regression. Uneven batch sizes are not allowed except
# for `test()` and `predict()` on TPUStrategy.
if context.executing_eagerly():
allow_partial_batch = (
mode != ModeKeys.TRAIN or
not dist_utils.is_tpu_strategy(distribution_strategy))
else:
allow_partial_batch = (
mode == ModeKeys.TRAIN or
((mode == ModeKeys.PREDICT or mode == ModeKeys.TEST) and
dist_utils.is_tpu_strategy(distribution_strategy)))
if steps is None:
if batch_size is None:
# If neither the batch size or number of steps are set. We choose the
# global batch size as the minimum of number of samples and 32. 32 is
# chosen to provide backward compatibility.
global_batch_size = min(num_samples, 32)
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
if allow_partial_batch:
steps = np.ceil(num_samples / global_batch_size).astype(int)
else:
if num_samples % global_batch_size:
raise ValueError('The number of samples %s is not divisible by '
'batch size %s.' % (num_samples, global_batch_size))
steps = num_samples // global_batch_size
else:
if batch_size is None:
# We calculate the batch size based on the number of steps specified
if num_samples % steps:
raise ValueError('The number of samples %s is not divisible by '
'steps %s. Please change the number of steps to a '
'value that can consume all the samples' % (
num_samples, steps))
global_batch_size = num_samples // steps
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
min_num_samples = global_batch_size * steps
if allow_partial_batch:
min_num_samples = global_batch_size * (steps-1) + 1 if steps > 1 else 0
if num_samples < min_num_samples:
raise ValueError('Number of samples %s is less than samples required '
'for specified batch_size %s and steps %s' % (
num_samples, global_batch_size, steps))
# We need to return the per replica or global batch size based on the strategy
if use_per_replica_batch:
if global_batch_size % distribution_strategy.num_replicas_in_sync:
raise ValueError(
'The batch size (%s) could not be sharded evenly across the sync '
'replicas (%s) in the distribution strategy.' % (
global_batch_size, distribution_strategy.num_replicas_in_sync))
batch_size = global_batch_size // distribution_strategy.num_replicas_in_sync
else:
batch_size = global_batch_size
return steps, batch_size
def get_batch_dimension(iterator):
shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(iterator))
# Take the batch size from the first element, as it should be the same for
# all.
dims = shapes[0].dims
return dims[0] if dims else None
def get_iterator(dataset, distribution_strategy):
with distribution_strategy.scope():
iterator = distribution_strategy.make_dataset_iterator(dataset)
initialize_iterator(iterator, distribution_strategy)
return iterator
def initialize_iterator(iterator, distribution_strategy):
with distribution_strategy.scope():
init_op = control_flow_ops.group(iterator.initializer)
if not context.executing_eagerly():
K.get_session((init_op,)).run(init_op)
def _get_input_from_iterator(iterator, model):
"""Get elements from the iterator and verify the input shape and type."""
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around that. Empty
# elements are going to get filtered out as part of the flattening.
if len(nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(nest.flatten(next_element)) == (len(model.inputs) +
len(model.outputs)):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
validate_distributed_dataset_inputs(
model._distribution_strategy, x, y, sample_weights)
return x, y, sample_weights
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Arguments:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
strategy = model._distribution_strategy
inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)
if dist_utils.is_tpu_strategy(strategy):
if sample_weights is not None:
raise ValueError('TPUStrategy does not support sample weights.')
# When the inputs are dict, then we want to flatten it in the same order as
# the input layers, such that the data are fed into the input layers in the
# correct order.
if isinstance(inputs, dict):
inputs = [inputs[key] for key in model._feed_input_names]
if is_distributing_by_cloning(model):
inputs = flatten_per_replica_values(strategy, inputs)
targets = flatten_per_replica_values(strategy, targets)
# Expand 1-dimensional inputs.
# TODO(b/124535720): Remove once this standarize data logic is shared with
# main flow.
inputs, targets = nest.map_structure(
training_utils_v1.standardize_single_array, (inputs, targets))
else:
inputs = training_utils_v1.ModelInputs(inputs).as_list()
if mode == ModeKeys.PREDICT:
sample_weights = []
targets = []
elif sample_weights is not None and is_distributing_by_cloning(model):
if context.executing_eagerly() and not model._compile_distribution:
raise NotImplementedError('`sample_weight` is not supported when using '
'tf.distribute.Strategy in eager mode and '
'cloning=True.')
sample_weights = flatten_per_replica_values(strategy, sample_weights)
ins = [inputs, targets, sample_weights]
return tuple(ins)
def is_distributing_by_cloning(model):
"""Decide whether this model is going to be distributed via cloning.
We are going to distribute the model by cloning in graph mode.
Args:
model: Keras model to distribute.
Returns:
True if the `model` is going to be distributed using cloning and False
otherwise.
"""
if (dist_utils.is_tpu_strategy(model._distribution_strategy) and
context.executing_eagerly): # b/137580852
return False
elif ops.executing_eagerly_outside_functions():
return bool(model._compile_distribution)
return True
def _custom_compile_for_predict(model):
"""Custom compile for TPU predict mode."""
if not model.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
model._is_compiled = True
model.total_loss = None
model.train_function = None
model.test_function = None
model.predict_function = None
def _build_network_on_replica(model, mode, inputs=None, targets=None):
"""Build an updated model on replicas.
We create a new Keras model while sharing the variables from the old graph.
Building a new sub-graph is required since the original keras model creates
placeholders for the input and the output that are not accessible till we
call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.
The sharing of weights and layers between the old and the new model guarantee
that we're using Strategy variables and any updates on either model are
reflected correctly in callbacks and loop iterations.
We need to make sure we share the optimizers between the old and the new model
as well so that optimizer state is not lost if the user is running fit
multiple times.
Args:
model: Model to be replicated across Replicas
mode: Which of fit/eval/predict is building the distributed network
inputs: Input variables to be passed to the model
targets: Target tensor to be passed to model.compile
Returns:
A new model with shared layers with the old model.
"""
# Need to do imports here since we run into a circular dependency error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.engine import sequential # pylint: disable=g-import-not-at-top
# We rely on the internal methods to avoid having share_weights weights in the
# public API.
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(
model, input_tensors=inputs, layer_fn=models.share_weights)
else:
updated_model = models._clone_functional_model(
model, input_tensors=inputs, layer_fn=models.share_weights)
# Callable losses added directly to a functional Model need to be added
# here.
updated_model._callable_losses = model._callable_losses
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
updated_model.outputs = [_upcast_low_precision_outputs(o)
for o in updated_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(updated_model)
else:
updated_model.compile(
model.optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return updated_model
def _build_distributed_network(model, strategy, mode, inputs=None,
targets=None):
"""Create a cloned model on each replica."""
with K.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_build_network_on_replica,
args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
def _clone_and_build_model(model, mode, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
cloned_model.outputs = [_upcast_low_precision_outputs(o)
for o in cloned_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(cloned_model)
else:
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return cloned_model
def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None):
"""Create a cloned model on each replica."""
with K.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_clone_and_build_model, args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
if mode == ModeKeys.TRAIN:
model._make_callback_model(distributed_model)
def _make_execution_function(model, mode):
"""Makes or reuses function to run one step of distributed model execution."""
if is_distributing_by_cloning(model):
return _make_execution_function_with_cloning(model, mode)
distributed_function = get_distributed_function(model, mode)
if distributed_function:
return distributed_function
distribution_function = _make_execution_function_without_cloning(model, mode)
set_distributed_function(model, mode, distribution_function)
return distribution_function
def _make_execution_function_without_cloning(model, mode):
"""Creates a function to run one step of distributed model execution."""
strategy = model._distribution_strategy
with strategy.scope():
per_replica_function = _make_replica_execution_function(model, mode)
def distributed_function(input_fn):
"""A single step of the distributed execution across replicas."""
x, y, sample_weights = input_fn()
# Call `Model.{train,test,predict}_on_batch` on every replica passing
# PerReplicas as arguments. On every replica inside this call, each
# PerReplica object will return the value for that replica. The outputs
# are PerReplicas too.
outputs = strategy.run(per_replica_function, args=(x, y, sample_weights))
# Out of PerReplica outputs reduce or pick values to return.
all_outputs = unwrap_outputs(
strategy, outputs, with_loss_tensor=(mode != ModeKeys.PREDICT))
return all_outputs
if not model.run_eagerly:
distributed_function = def_function.function(distributed_function)
def execution_function(input_fn):
# `numpy` translates Tensors to values in Eager mode.
return [out.numpy() for out in distributed_function(input_fn)]
else:
execution_function = distributed_function
return execution_function
def _make_replica_execution_function(model, mode):
"""A single step of the distributed execution on a replica."""
if mode == ModeKeys.TRAIN:
func = model.train_on_batch
elif mode == ModeKeys.TEST:
func = model.test_on_batch
else:
def predict_on_batch(x, y=None, sample_weights=None):
del y, sample_weights
return model.predict_on_batch(x)
func = predict_on_batch
if mode != ModeKeys.PREDICT:
# `reset_metrics` is set to False to maintain stateful metrics across
# batch-level calls.
func = functools.partial(func, reset_metrics=False)
return func
def _make_replicated_models_with_cloning(model, mode):
"""Build models on each replica."""
strategy = model._distribution_strategy
# If distributed_model is not built, create one for `mode`.
if model._compile_distribution:
clone_model_on_replicas(model, strategy, mode)
else:
_build_distributed_network(model, strategy, mode)
def _make_execution_function_with_cloning(model, mode):
"""Clones or re-uses models to run one step of distributed model execution."""
distributed_model = get_distributed_model(model, mode)
# TODO(b/134069401): Create a cache for the distributed model and exec
# function that incorporates additional attributes to be part of the cache key
# than just the mode.
# If distributed model for a particular `mode` is already built, use the
# `_distribution_function` on that distributed model.
# If you have updated the sample_weight_mode on the model, then you will need
# to recompile metrics and recreate the execution function. This is indicated
# by the `_recompile_exec_function` property.
if (distributed_model and hasattr(distributed_model, '_distribution_function')
and not (hasattr(distributed_model, '_recompile_exec_function') and
distributed_model._recompile_exec_function)):
return distributed_model._distributed_function
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
assert distributed_model
# Also create an execution function on that distributed model.
if context.executing_eagerly():
distributed_function = _make_eager_execution_function(model, mode)
else:
distributed_function = _make_graph_execution_function(model, mode)
# We cache the distributed execution function on the model since creating
# distributed models and execution functions are expensive.
distributed_model._distributed_function = distributed_function
distributed_model._recompile_exec_function = False
return distributed_function
def _make_graph_execution_function(model, mode):
"""Makes function to run one step of distributed model in graph mode."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)
strategy = model._distribution_strategy
with strategy.scope():
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
# Initialize the variables in the replicated model. This is necessary for
# multi-worker training because on some workers, initialization is not
# needed. This method does initialization or waiting for initialization
# according to the context object of distribute coordinator.
init_restore_or_wait_for_variables()
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates, all_session_args) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
with_loss_tensor=(mode != ModeKeys.PREDICT))
return K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_{}_function'.format(mode),
**all_session_args)
def _make_eager_execution_function(model, mode):
"""Makes function to run one step of distributed model eager execution."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs)
# NOTE(priyag): Try creating a new FuncGraph within DS scope instead of using
# the global one.
strategy = model._distribution_strategy
global_graph = K.get_graph()
with global_graph.as_default(), strategy.scope():
# First we gather the relevant portions of the model across all replicas.
# `K._scratch_graph(global_graph)` signals to Keras that it should not
# lift to a separate graph when creating the per-replica functions.
with K._scratch_graph(global_graph):
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
grouped = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
grouped_inputs, grouped_outputs = grouped
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of
# inputs/outputs on all the devices over which the model is distributed.
(all_inputs, all_outputs, _, _) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
with_loss_tensor=(mode != ModeKeys.PREDICT))
# Finally, a joint Keras function is created; this one will be created in
# a separate FuncGraph.
return K.function(
all_inputs,
all_outputs,
name='eager_distributed_{}_function'.format(mode))
def _copy_weights_to_distributed_model(original_model, mode):
"""Copies weights from original model to distributed models."""
strategy = original_model._distribution_strategy
distributed_model = get_distributed_model(original_model, mode)
if strategy:
# Copy the weights from the original model to each of the replicated
# models.
orig_model_weights = original_model.get_weights()
first_model = strategy.unwrap(distributed_model)[0]
set_weights(strategy, first_model, orig_model_weights)
def _copy_weights_to_original_model(model, mode):
"""Copies weights from first distributed model back to original model."""
if model._distribution_strategy and mode == ModeKeys.TRAIN:
distributed_model = get_distributed_model(model, mode)
updated_weights = model._distribution_strategy.unwrap(
distributed_model)[0].get_weights()
model.set_weights(updated_weights)
def _per_replica_aggregate_batch(strategy, batch_outs, model, mode):
"""Aggregates the per-replica batch-level outputs from a distributed step."""
if strategy is not None and mode == ModeKeys.PREDICT:
total_batch_outs = []
for i in range(len(model.outputs)):
num_replicas = strategy.num_replicas_in_sync
nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]
total_batch_outs.append(
concat_along_batch_dimension(nest.flatten(nested_outs)))
return total_batch_outs
return batch_outs
def _reset_metrics(model):
if model._distribution_strategy:
for mode in [ModeKeys.TRAIN, ModeKeys.TEST, ModeKeys.PREDICT]:
distributed_model = get_distributed_model(model, mode)
if distributed_model:
first_model = model._distribution_strategy.unwrap(distributed_model)[0]
first_model.reset_metrics()
def get_distributed_model(model, mode):
key = _generate_cache_key(mode)
return model._distributed_model_cache.get(key, None)
def set_distributed_model(model, mode, distributed_model):
key = _generate_cache_key(mode)
model._distributed_model_cache[key] = distributed_model
def get_distributed_function(model, mode):
key = _generate_cache_key(mode)
return model._distributed_function_cache.get(key, None)
def set_distributed_function(model, mode, distributed_function):
key = _generate_cache_key(mode)
model._distributed_function_cache[key] = distributed_function
def _generate_cache_key(mode):
key = hash(mode)
return key
@tf_contextlib.contextmanager
def distributed_scope(strategy, learning_phase):
with strategy.scope(), K.learning_phase_scope(learning_phase):
yield
def is_current_worker_chief():
return dc_context.get_current_worker_context().is_chief
def filter_distributed_callbacks(callbacks_list, model):
"""Filter Callbacks based on the worker context when running multi-worker.
Arguments:
callbacks_list: A list of `Callback` instances.
model: Keras model instance.
Returns:
The list of `Callback` instances that should be run on this worker.
"""
if not model._in_multi_worker_mode():
raise ValueError(
'filter_distributed_callbacks() should only be called when Keras '
'is in multi worker mode.')
callbacks_list = callbacks_list or []
if not [
c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)
]:
# TODO(rchao): Consider providing a ModelCheckpoint here if the user
# fails to (possibly with tempfile directory).
logging.warning('ModelCheckpoint callback is not provided. '
'Workers will need to restart training if any fails.')
if callbacks_list is None or is_current_worker_chief():
return callbacks_list
# Some Callbacks should only run on the chief worker.
return [
callback for callback in callbacks_list if not callback._chief_worker_only
] # pylint: disable=protected-access
def _update_sample_weight_modes(model, mode, sample_weights):
"""Update sample_weight_mode of the distributed model."""
if is_distributing_by_cloning(model):
distributed_model = get_distributed_model(model, mode)
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
distributed_model._recompile_exec_function = any(
[e.sample_weights_mismatch() for e in model._training_endpoints])
if sample_weights:
distributed_models = flatten_per_replica_values(
model._distribution_strategy, distributed_model)
# sample_weights is a tuple of 1 list where the number of elements in the
# list is equal to the number of replicas in sync.
sample_weights = sample_weights[0]
if sample_weights and None not in sample_weights:
for m, sw in zip(distributed_models, sample_weights):
m._update_sample_weight_modes(sample_weights=[sw])
def concat_along_batch_dimension(outputs):
"""Concats prediction outputs along the batch dimension."""
if isinstance(outputs[0], sparse_tensor.SparseTensor):
return sparse_ops.sparse_concat_v2(axis=0, sp_inputs=outputs)
if isinstance(outputs[0], ragged_tensor.RaggedTensor):
return array_ops.concat(outputs, axis=0)
return np.concatenate(outputs)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
import behaviouralExperimentDefinition.models
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='behaviourExperimentType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('about', models.CharField(max_length=60, blank=True)),
('public', models.BooleanField(default=False)),
('public_set_date', models.DateTimeField(default=datetime.datetime.now)),
('description', models.TextField(max_length=1000, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('creator', models.ForeignKey(related_name='behaviouralExperiment_own', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='chemicalType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('diffusionCoefficient', models.FloatField(default=0)),
('chemical_name', models.CharField(default=b'cAMP', max_length=60, choices=[(b'Lys', b'Lysine'), (b'BIOTIN', b'Biotin'), (b'cAMP', b'Cyclic adenosine monophosphate'), (b'Na+', b'Sodium ion'), (b'Cl-', b'Chlorine ion'), (b'HM', b'Heavy metals'), (b'Cu', b'Copper'), (b'Cd', b'Cadmium'), (b'SDS', b'Sodium dodecyl sulfate'), (b'QUININE', b'Quinine')])),
('isVolatile', models.BooleanField(default=False)),
('volatilitySpeed', models.FloatField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='chemotaxisExperimentWideType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(max_length=1000, blank=True)),
('chemicalCategory', models.CharField(default=b'CQ1', max_length=60, choices=[(b'SPS', b'Static point source'), (b'CQ1', b'chemicalquadrants1'), (b'CQ2', b'chemicalquadrants2'), (b'CQ4', b'chemicalquadrants4'), (b'OR', b'osmoticring')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='chemotaxisQuadrantsType_1_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('quadrantChemicalConcentration', models.FloatField()),
('quadrantChemical', models.ForeignKey(related_name='access_quadrant_1_1', to='behaviouralExperimentDefinition.chemicalType_model')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='chemotaxisQuadrantsType_2_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('quadrant_1_ChemicalConcentration', models.FloatField()),
('quadrant_2_ChemicalConcentration', models.FloatField()),
('quadrantBarrierChemicalConcentration', models.FloatField()),
('quadrantBarrierChemical', models.ForeignKey(related_name='access_quadrant_2_Barrier', to='behaviouralExperimentDefinition.chemicalType_model')),
('quadrant_1_Chemical', models.ForeignKey(related_name='access_quadrant_2_1', to='behaviouralExperimentDefinition.chemicalType_model')),
('quadrant_2_Chemical', models.ForeignKey(related_name='access_quadrant_2_2', to='behaviouralExperimentDefinition.chemicalType_model')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='chemotaxisQuadrantsType_4_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('quadrant_1_ChemicalConcentration', models.FloatField()),
('quadrant_2_ChemicalConcentration', models.FloatField()),
('quadrant_3_ChemicalConcentration', models.FloatField()),
('quadrant_4_ChemicalConcentration', models.FloatField()),
('quadrantBarrierChemicalConcentration', models.FloatField()),
('quadrantBarrierChemical', models.ForeignKey(related_name='access_quadrant_4_Barrier', to='behaviouralExperimentDefinition.chemicalType_model')),
('quadrant_1_Chemical', models.ForeignKey(related_name='access_quadrant_4_1', to='behaviouralExperimentDefinition.chemicalType_model')),
('quadrant_2_Chemical', models.ForeignKey(related_name='access_quadrant_4_2', to='behaviouralExperimentDefinition.chemicalType_model')),
('quadrant_3_Chemical', models.ForeignKey(related_name='access_quadrant_4_3', to='behaviouralExperimentDefinition.chemicalType_model')),
('quadrant_4_Chemical', models.ForeignKey(related_name='access_quadrant_4_4', to='behaviouralExperimentDefinition.chemicalType_model')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='chemotaxisTimeEventType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(max_length=1000, blank=True)),
('chemotaxisType', models.CharField(default=b'DDT', max_length=60, choices=[(b'DDT', b'Dynamic drop test')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='chemotaxisTimet0tot1Type_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='crowdingType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('wormsDistributionInPlate', models.CharField(max_length=60, blank=True)),
('wormsInPlate', models.PositiveIntegerField(default=1, validators=[django.core.validators.MinValueValidator(1)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CubeType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('depth', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('side1Length', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('side2Length', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CylinderType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('length', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('radius', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='directTouchType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('directTouchInstrument', models.CharField(default=b'EB', max_length=60, choices=[(b'EB', b'Eyebrow'), (b'VFH', b'Von Frey hair'), (b'PW', b'Platinium wire')])),
('touchDistance', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(1.0)])),
('touchAngle', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(6.28318)])),
('appliedForce', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='dynamicDropTestType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('dropQuantity', models.FloatField()),
('chemicalConcentration', models.FloatField()),
('xCoordFromPlateCentre', models.FloatField()),
('yCoordFromPlateCentre', models.FloatField()),
('chemical', models.ForeignKey(to='behaviouralExperimentDefinition.chemicalType_model')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='electricShockType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('amplitude', models.FloatField()),
('shockDuration', models.PositiveIntegerField()),
('shockFrequency', models.FloatField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='environmentType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(max_length=1000, blank=True)),
('crowding', models.ForeignKey(to='behaviouralExperimentDefinition.crowdingType_model')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='experimentType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(max_length=1000, blank=True)),
('experimentDuration', models.PositiveIntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='experimentWideConfType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(default=b'No description provided', max_length=1000, blank=True)),
('experimentCategory', models.CharField(default=b'MS', max_length=60, choices=[(b'MS', b'mechanosensation'), (b'CT', b'chemotaxis'), (b'TT', b'termotaxis'), (b'GT', b'galvanotaxis'), (b'PT', b'phototaxis')])),
('chemotaxis', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.chemotaxisExperimentWideType_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='galvanotaxisExperimentWideType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='galvanotaxisTimeEventType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='galvanotaxisTimet0tot1Type_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(default=b'', max_length=1000, blank=True)),
('galvanotaxisType', models.CharField(default=b'ES', max_length=60, choices=[(b'ES', b'Electric shocks')])),
('electricShockConf', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.electricShockType_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='HexagonType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('depth', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('sideLength', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='interactionAtSpecificTimeType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(default=b'No description provided', max_length=1000, blank=True)),
('eventTime', models.FloatField()),
('experimentCategory', models.CharField(default=b'MS', max_length=60, choices=[(b'MS', b'mechanosensation'), (b'CT', b'chemotaxis'), (b'TT', b'termotaxis'), (b'GT', b'galvanotaxis'), (b'PT', b'phototaxis')])),
('chemotaxis', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.chemotaxisTimeEventType_model', null=True)),
('galvanotaxis', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.galvanotaxisTimeEventType_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='interactionFromt0tot1Type_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(default=b'No description provided', max_length=1000, blank=True)),
('eventStartTime', models.FloatField()),
('eventStopTime', models.FloatField()),
('experimentCategory', models.CharField(default=b'MS', max_length=60, choices=[(b'MS', b'mechanosensation'), (b'CT', b'chemotaxis'), (b'TT', b'termotaxis'), (b'GT', b'galvanotaxis'), (b'PT', b'phototaxis')])),
('chemotaxis', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.chemotaxisTimet0tot1Type_model', null=True)),
('galvanotaxis', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.galvanotaxisTimet0tot1Type_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='linearThermalGradientType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('temperatureRightHorizonal', models.FloatField()),
('temperatureLeftHorizontal', models.FloatField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='mechanosensationExpWideType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='mechanosensationTimeEventType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(max_length=1000, blank=True)),
('interactionType', models.CharField(default=b'DWT', max_length=60, choices=[(b'PT', b'plateTap'), (b'DWT', b'directWormTouch')])),
('directTouch', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.directTouchType_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='mechanosensationTimet0tot1Type_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='obstacleLocationType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('xCoordFromPlateCentre', models.FloatField()),
('yCoorDFromPlateCentre', models.FloatField()),
('Stiffness', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('shape', models.CharField(default=b'CY', max_length=60, choices=[(b'CY', b'cylinder'), (b'CU', b'cube'), (b'HE', b'hexagon')])),
('Cube', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.CubeType_model', null=True)),
('Cylinder', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.CylinderType_model', null=True)),
('Hexagon', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.HexagonType_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='osmoticRingType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('chemicalConcentration', models.FloatField()),
('internalRadius', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('externalRadius', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('ringChemical', models.ForeignKey(to='behaviouralExperimentDefinition.chemicalType_model')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='phototaxisExperimentWideType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='phototaxisTimeEventType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='phototaxisTimet0tot1Type_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(max_length=1000, blank=True)),
('phototaxisType', models.CharField(default=b'PSL', max_length=60, choices=[(b'PSL', b'pointsourcelight')])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='plateConfigurationType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('lid', models.BooleanField(default=False)),
('bottomMaterial', models.CharField(default=b'A', max_length=60, choices=[(b'W', b'water'), (b'G', b'gelatin'), (b'A', b'agar')])),
('dryness', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('shape', models.CharField(default=b'CY', max_length=60, choices=[(b'CY', b'cylinder'), (b'CU', b'cube'), (b'HE', b'hexagon')])),
('Cube', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.CubeType_model', null=True)),
('Cylinder', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.CylinderType_model', null=True)),
('Hexagon', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.HexagonType_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='plateTapType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('appliedForce', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='pointSourceHeatAvoidanceType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('temperature', models.FloatField()),
('heatPointDistance', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(1)])),
('heatPointAngle', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(6.28318)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='pointSourceLightType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('waveLength', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(255)])),
('intensity', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(255)])),
('lightingPointDistance', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(1)])),
('lightBeamRadius', models.FloatField(default=0.1, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(1)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='shareBehaviouralExperiment',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('shared_date', models.DateTimeField(auto_now_add=True)),
('behaviouralExperiment', models.ForeignKey(to='behaviouralExperimentDefinition.behaviourExperimentType_model')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='staticPointSourceType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('dropQuantity', models.FloatField()),
('chemicalConcentration', models.FloatField()),
('xCoordFromPlateCentre', models.FloatField()),
('yCoordFromPlateCentre', models.FloatField()),
('chemical', models.ForeignKey(to='behaviouralExperimentDefinition.chemicalType_model')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='temperatureChangeInTimeType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('initialTemperature', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('finalTemperature', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='termotaxisExperimentWideType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(max_length=1000, blank=True)),
('termotaxisType', models.CharField(default=b'LT', max_length=60, choices=[(b'LT', b'linearThermalGradient')])),
('linearThermalGradient', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.linearThermalGradientType_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='termotaxisTimeEventType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='termotaxisTimet0tot1Type_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('description', models.TextField(max_length=1000, blank=True)),
('termotaxisType', models.CharField(default=b'TC', max_length=60, choices=[(b'TC', b'temperatureChangeInTime'), (b'PS', b'pointsourceheatavoidance')])),
('pointSourceHeatAvoidance', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.pointSourceHeatAvoidanceType_model', null=True)),
('temperatureChangeInTime', models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.temperatureChangeInTimeType_model', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='wormDataType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('gender', models.CharField(default=b'FH', max_length=60, choices=[(b'M', b'Male'), (b'FH', b'Female Hermaphrodites')])),
('age', models.PositiveIntegerField()),
('stageOfLifeCycle', models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4)])),
('timeOffFood', models.PositiveIntegerField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='wormStatusType_model',
fields=[
('uuid', models.CharField(default=behaviouralExperimentDefinition.models.generate_new_uuid, max_length=36, serialize=False, verbose_name=b'Unique Identifier', primary_key=True)),
('xCoordFromPlateCentre', models.FloatField()),
('yCoorDFromPlateCentre', models.FloatField()),
('angleRelativeXaxis', models.FloatField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(6.28318)])),
('wormData', models.ForeignKey(to='behaviouralExperimentDefinition.wormDataType_model')),
],
options={
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='sharebehaviouralexperiment',
unique_together=set([('user', 'behaviouralExperiment')]),
),
migrations.AddField(
model_name='phototaxistimet0tot1type_model',
name='pointSourceLightConf',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.pointSourceLightType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='mechanosensationtimeeventtype_model',
name='plateTap',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.plateTapType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='interactionfromt0tot1type_model',
name='mechanosensation',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.mechanosensationTimet0tot1Type_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='interactionfromt0tot1type_model',
name='phototaxis',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.phototaxisTimet0tot1Type_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='interactionfromt0tot1type_model',
name='termotaxis',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.termotaxisTimet0tot1Type_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='interactionatspecifictimetype_model',
name='mechanosensation',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.mechanosensationTimeEventType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='interactionatspecifictimetype_model',
name='phototaxis',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.phototaxisTimeEventType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='interactionatspecifictimetype_model',
name='termotaxis',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.termotaxisTimeEventType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='experimentwideconftype_model',
name='galvanotaxis',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.galvanotaxisExperimentWideType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='experimentwideconftype_model',
name='mechanosensation',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.mechanosensationExpWideType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='experimentwideconftype_model',
name='phototaxis',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.phototaxisExperimentWideType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='experimentwideconftype_model',
name='termotaxis',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.termotaxisExperimentWideType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='experimenttype_model',
name='experimentWideConf',
field=models.ManyToManyField(to='behaviouralExperimentDefinition.experimentWideConfType_model', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='experimenttype_model',
name='interactionAtSpecificTime',
field=models.ManyToManyField(to='behaviouralExperimentDefinition.interactionAtSpecificTimeType_model', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='experimenttype_model',
name='interactionFromt0tot1',
field=models.ManyToManyField(to='behaviouralExperimentDefinition.interactionFromt0tot1Type_model', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='environmenttype_model',
name='obstacle',
field=models.ManyToManyField(to='behaviouralExperimentDefinition.obstacleLocationType_model', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='environmenttype_model',
name='plateConfiguration',
field=models.ForeignKey(to='behaviouralExperimentDefinition.plateConfigurationType_model'),
preserve_default=True,
),
migrations.AddField(
model_name='environmenttype_model',
name='wormStatus',
field=models.ForeignKey(to='behaviouralExperimentDefinition.wormStatusType_model'),
preserve_default=True,
),
migrations.AddField(
model_name='chemotaxistimeeventtype_model',
name='dynamicDropTestConf',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.dynamicDropTestType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='chemotaxisexperimentwidetype_model',
name='chemotaxisQuadrants1',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.chemotaxisQuadrantsType_1_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='chemotaxisexperimentwidetype_model',
name='chemotaxisQuadrants2',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.chemotaxisQuadrantsType_2_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='chemotaxisexperimentwidetype_model',
name='chemotaxisQuadrants4',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.chemotaxisQuadrantsType_4_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='chemotaxisexperimentwidetype_model',
name='osmoticRing',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.osmoticRingType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='chemotaxisexperimentwidetype_model',
name='staticPointSourceConf',
field=models.ForeignKey(blank=True, to='behaviouralExperimentDefinition.staticPointSourceType_model', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='behaviourexperimenttype_model',
name='environmentDefinition',
field=models.ForeignKey(to='behaviouralExperimentDefinition.environmentType_model'),
preserve_default=True,
),
migrations.AddField(
model_name='behaviourexperimenttype_model',
name='experimentDefinition',
field=models.ForeignKey(to='behaviouralExperimentDefinition.experimentType_model'),
preserve_default=True,
),
migrations.AddField(
model_name='behaviourexperimenttype_model',
name='users_with_access',
field=models.ManyToManyField(related_name='behaviouralExperiment_accessable', through='behaviouralExperimentDefinition.shareBehaviouralExperiment', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 16:02:10 2018
@author: BallBlueMeercat
"""
import matplotlib.pyplot as plt
from emcee import EnsembleSampler
import numpy as np
import time
import os.path
import datasim
import tools
import ln
import plots
def stats(test_params, data_dict, sigma, nsteps,
save_path, firstderivs_key):
"""
Takes in:
test_params = dictionary of parameters to be emcee fitted
'm':int/float = e_m(t)/ec(t0) at t=t0;
'gamma':int/float = interaction term;
'zeta':int/float = interaction term;
'alpha':int/float = SN peak mag correlation parameter;
'beta' :int/float = SN peak mag correlation parameter;
data_dict = dictionary of parameters from data
'colour': numpy.ndarray = SN colour;
'x1': numpy.ndarray = SN stretch correction as;
'zpicks':list of redshifts sorted in accending order;
'mag':list of apparent magnitudes;
sigma = standard deviation of error on the data;
nsteps = int, steps to be taken by each emcee walker;
save_path = string, directory for saving output;
firstderivs_key = string, name of IVCDM model to use for model mag.
Returns:
"""
# print('-stats has been called')
zpicks = data_dict.get('zpicks',0)
mag = data_dict.get('mag',0)
if firstderivs_key == 'exotic':
pass
elif firstderivs_key == 'LCDM':
test_params['gamma'] = 0
del test_params['gamma']
test_params['zeta'] = 0
del test_params['zeta']
else:
test_params['zeta'] = 0
del test_params['zeta']
# emcee parameters:
ndim = len(test_params)
nwalkers = int(ndim * 2)
# Initializing walkers.
poslist = list(test_params.values())
pos = []
for i in poslist:
pos.append(i)
startpos = np.array(pos)
pos = [startpos + 0.001*np.random.randn(ndim) for i in range(nwalkers)]
# Are walkers starting outside of prior?
i = 0
while i < nwalkers:
theta = pos[i]
lp = ln.lnprior(theta, firstderivs_key)
if not np.isfinite(lp):
print('~~~~~~~pos[%s] (outside of prior) = %s ~~~~~~~'%(i, theta))
i += 1
# Sampler setup.
times0 = time.time() # starting sampler timer
sampler = EnsembleSampler(nwalkers, ndim, ln.lnprob,
args=(data_dict, sigma, firstderivs_key, ndim))
# Burnin.
burnin = int(nsteps/4) # steps to discard
print('_____ burnin start')
timeb0 = time.time() # starting burnin timer
pos, prob, state = sampler.run_mcmc(pos, burnin)
timeb1=time.time() # stopping burnin timer
print('_____ burnin end')
sampler.reset()
# Starting sampler after burnin.
print('_____ sampler start')
sampler.run_mcmc(pos, nsteps)
print('_____ sampler end')
times1=time.time() # stopping sampler timer
# Walker steps.
lnprob = sampler.flatlnprobability
# Index of best parameters found by emcee.
bi = np.argmax(sampler.flatlnprobability) # index with highest post prob
trace = sampler.chain[:, burnin:, :].reshape(-1, ndim)
# Extracting results:
thetabest = np.zeros(ndim)
parambest = {}
true = []
propert = {}
propert['trace'] = trace
colours = ['coral', 'orchid', 'apple', 'orange', 'aquamarine', 'black']
def stat(i, sampler, string, test_params, propert):
best_output = sampler.flatchain[bi,i]
# Input m = e_m(z)/ec(z=0).
param_true = test_params.get(string, 0)
true.append(param_true)
# Output m.
output = sampler.flatchain[:,i]
# Standard deviation and mean of the m distribution.
propert[string+'_sd'] = np.std(output)
propert[string+'_mean'] = np.mean(output)
propert[string] = sampler.flatchain[bi,i]
return best_output, output, param_true, propert
for i in range(ndim):
if i == 0:
best, output, param_true, propert = stat(i, sampler, 'm', test_params, propert)
plots.stat(colours[i], output, param_true, 'matter', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['m'] = best
elif i == 1:
best, output, param_true, propert = stat(i, sampler, 'M', test_params, propert)
plots.stat(colours[i], output, param_true, 'Mcorr', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['M'] = best
elif i == 2:
best, output, param_true, propert = stat(i, sampler, 'a', test_params, propert)
plots.stat(colours[i], output, param_true, 'alpha', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['alpha'] = best
elif i == 3:
best, output, param_true, propert = stat(i, sampler, 'b', test_params, propert)
plots.stat(colours[i], output, param_true, 'beta', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['beta'] = best
elif i == 4:
best, output, param_true, propert = stat(i, sampler, 'g', test_params, propert)
plots.stat(colours[i], output, param_true, 'gamma', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['gamma'] = best
elif i == 5:
best, output, param_true, propert = stat(i, sampler, 'z', test_params, propert)
plots.stat(colours[i], output, param_true, 'zeta', lnprob, zpicks,
mag, sigma, nsteps, nwalkers, save_path, firstderivs_key)
thetabest[i] = best
parambest['zeta'] = best
# Checking if best found parameters are within prior.
lp = ln.lnprior(thetabest, firstderivs_key)
if not np.isfinite(lp):
print('')
print('best emcee parameters outside of prior (magbest calculation)')
print('')
# Plot of data mag and redshifts, overlayed with
# mag simulated using emcee best parameters and data redshifts.
magbest = datasim.magn(parambest, data_dict, firstderivs_key)
plt.figure()
plt.title('model: '+firstderivs_key
+'\n Evolution of magnitude with redshift \n nsteps: '
+str(nsteps)+', noise: '+str(sigma)+', npoints: '+str(len(zpicks)))
data = plt.errorbar(zpicks, mag, yerr=sigma, fmt='.', alpha=0.3)
best_fit = plt.scatter(zpicks, magbest, lw='1', c='xkcd:tomato')
plt.ylabel('magnitude')
plt.xlabel('z')
plt.legend([data, best_fit], ['LCDM', firstderivs_key])
stamp = str(int(time.time()))
filename = str(stamp)+'____magz__nsteps_'+str(nsteps)+'_nwalkers_' \
+str(nwalkers)+'_noise_'+str(sigma)+'_numpoints_'+str(len(zpicks))+'.png'
filename = os.path.join(save_path, filename)
plt.savefig(filename)
plt.show(block=False)
# Corner plot (walkers' walk + histogram).
import corner
# samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples = sampler.chain[:, :, :].reshape((-1, ndim))
corner.corner(samples, labels=["$m$", "$M$", "$alpha$", "$beta$", "$g$", "$z$"],
truths=true)
# Results getting printed:
if bi == 0:
print('@@@@@@@@@@@@@@@@@')
print('best index =',str(bi))
print('@@@@@@@@@@@@@@@@@')
print('best parameters =',str(parambest))
print('m.a.f.:', np.mean(sampler.acceptance_fraction))
print('nsteps:', str(nsteps))
print('sigma:', str(sigma))
print('npoints:', str(len(zpicks)))
print('model:', firstderivs_key)
tools.timer('burnin', timeb0, timeb1)
tools.timer('sampler', times0, times1)
return propert, sampler
|
|
import sys
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class Terminal(object):
terminfo = None
columns = 80
lines = 25
isatty = True
isCygwin = False
# output colors
C_BLACK = 0 # whatever basic terminal color is set at?
C_RED = 1
C_GREEN = 2
C_YELLOW = 3
C_BLUE = 4
C_MAGENTA = 5
C_CYAN = 6
C_WHITE = 7
# Offset for things that extend to terminal width terminal.getColumns(),
# to keep things from being too familiar with right edge
TERMINAL_WIDTH_OFFSET = 2
def __init__(self, options = {}):
if 'terminfo' in options:
self.terminfo = options['terminfo']
else:
from .terminfo import Terminfo
self.terminfo = Terminfo()
# Set whether output is a tty
self.isatty = is_a_tty(sys.stdout)
# Set the columns and lines
(self.columns, self.lines) = self._getSize()
def isatty(self):
return self.isatty
def printterm(self, text):
if self.isatty:
sys.stdout.write(text)
def clear(self):
self.printterm(self.terminfo.clear())
return self
def locate(self, row, col):
self.printterm(self.terminfo.cup(row, col))
return self
def boldType(self):
self.printterm(self.terminfo.bold())
return self
def setFgColor(self, num):
self.printterm(self.terminfo.setaf(num))
return self
def setBgColor(self, num):
self.printterm(self.terminfo.setab(num))
return self
def prompt(self, text):
return raw_input(text)
def getColumns(self):
(self.columns, self.lines) = self._getSize()
return self.columns
def getLines(self):
(self.columns, self.lines) = self._getSize()
return self.lines
def centerText(self, text):
# TODO: implement
return text
def prettyMessage(self, text, fg=7, bg=4, size_=None, verticalPadding=True):
if None == size_:
size_ = self.columns
length = len(text) + 4
start = self.setaf(fg) + self.setab(bg)
end = self.op() + "\n"
newline = end + start
if length > size_ or "\n" in text:
length = size_
text = self.wordwrap(text, size_-4)
lines = text.split("\n")
text = ''
for line in lines:
line = " " + line.strip()
text = text + line.ljust(size_) + newline
else:
text = ' ' + text + ' ' + newline
if verticalPadding == True:
padding = ' ' * length
else:
padding = ''
end = end.strip()
newline = end + start
out = start \
+ padding + newline \
+ text \
+ padding \
+ end
print(out)
return self
def makeBox(self, y, x, w, h):
# TODO: implement
return ''
def startAltCharsetMode(self):
self.printterm(chr(27) + chr(40) + chr(48))
def endAltCharsetMode(self):
self.printterm(chr(27) + chr(40) + chr(66))
def __getattr__(self, attr):
def default_method(*args):
if not self.isatty:
return ''
return self.terminfo.doCapability(attr, *args)
return default_method
def wordwrap(self, string, width=80, ind1=0, ind2=0, prefix=''):
""" word wrapping function.
string: the string to wrap
width: the column number to wrap at
prefix: prefix each line with this string (goes before any indentation)
ind1: number of characters to indent the first line
ind2: number of characters to indent the rest of the lines
"""
string = prefix + ind1 * " " + string
newstring = ""
while len(string) > width:
# find position of nearest whitespace char to the left of "width"
marker = width - 1
while not string[marker].isspace():
marker = marker - 1
# remove line from original string and add it to the new string
newline = string[0:marker] + "\n"
newstring = newstring + newline
string = prefix + ind2 * " " + string[marker + 1:]
return newstring + string
def wordwrapDescription(self, string, width=80, indent=0):
""" word wrapping function that uses all of the available space
for the last column
string: the string to wrap
width: the column number to wrap at
indent: number of characters to indent the second line on
"""
if width < 35:
# if we don't have that much room for the description column, we'll
# skip the fancy wrapping
return string
newstring = ""
while len(string) > width:
# find position of nearest whitespace char to the left of "width"
marker = width - 1
while not string[marker].isspace():
marker = marker - 1
if marker < 0:
# couldn't find any spaces so we'll skip the wrapping on
# this one
return string
# remove line from original string and add it to the new string
newline = string[0:marker] + '\n'
if newstring != '':
newstring += indent * ' '
newstring += newline
string = string[marker + 1:]
if newstring != '':
newstring += indent * ' '
return newstring + string
def _getSize(self):
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
|
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
from cntk import output_variable, FreeDimension
from cntk.ops.functions import UserFunction
import yaml
import numpy as np
import numpy.random as npr
from utils.rpn.bbox_transform import bbox_transform
from utils.cython_modules.cython_bbox import bbox_overlaps
try:
from config import cfg
except ImportError:
from utils.default_config import cfg
DEBUG = False
class ProposalTargetLayer(UserFunction):
'''
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
'''
def __init__(self, arg1, arg2, name='ProposalTargetLayer', param_str=None, deterministic=False):
super(ProposalTargetLayer, self).__init__([arg1, arg2], name=name)
self.param_str_ = param_str if param_str is not None else "'num_classes': 2"
# parse the layer parameter string, which must be valid YAML
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._determininistic_mode = deterministic
self._count = 0
self._fg_num = 0
self._bg_num = 0
def infer_outputs(self):
# sampled rois (0, x1, y1, x2, y2)
# for CNTK the proposal shape is [4 x roisPerImage], and mirrored in Python
rois_shape = (FreeDimension, 4)
labels_shape = (FreeDimension, self._num_classes)
bbox_targets_shape = (FreeDimension, self._num_classes * 4)
bbox_inside_weights_shape = (FreeDimension, self._num_classes * 4)
return [output_variable(rois_shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes,
name="rpn_target_rois_raw", needs_gradient=False),
output_variable(labels_shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes,
name="label_targets_raw", needs_gradient=False),
output_variable(bbox_targets_shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes,
name="bbox_targets_raw", needs_gradient=False),
output_variable(bbox_inside_weights_shape, self.inputs[0].dtype, self.inputs[0].dynamic_axes,
name="bbox_inside_w_raw", needs_gradient=False)]
def forward(self, arguments, outputs, device=None, outputs_to_retain=None):
bottom = arguments
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
all_rois = bottom[0][0,:]
# remove zero padded proposals
keep0 = np.where(
((all_rois[:, 2] - all_rois[:, 0]) > 0) &
((all_rois[:, 3] - all_rois[:, 1]) > 0)
)
all_rois = all_rois[keep0]
# GT boxes (x1, y1, x2, y2, label)
# TODO(rbg): it's annoying that sometimes I have extra info before
# and other times after box coordinates -- normalize to one format
gt_boxes = bottom[1][0,:]
# remove zero padded ground truth boxes
keep1 = np.where(
((gt_boxes[:,2] - gt_boxes[:,0]) > 0) &
((gt_boxes[:,3] - gt_boxes[:,1]) > 0)
)
gt_boxes = gt_boxes[keep1]
assert gt_boxes.shape[0] > 0, \
"No ground truth boxes provided"
# Include ground-truth boxes in the set of candidate rois
# for CNTK: add batch index axis with all zeros to both inputs
all_rois = np.vstack((all_rois, gt_boxes[:, :-1]))
zeros = np.zeros((all_rois.shape[0], 1), dtype=all_rois.dtype)
all_rois = np.hstack((zeros, all_rois))
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
rois_per_image = cfg.TRAIN.BATCH_SIZE
fg_rois_per_image = np.round(cfg["TRAIN"].FG_FRACTION * rois_per_image).astype(int)
# Sample rois with classification labels and bounding box regression
# targets
labels, rois, bbox_targets, bbox_inside_weights = _sample_rois(
all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, self._num_classes,
deterministic=self._determininistic_mode)
if DEBUG:
print ('num rois: {}'.format(rois_per_image))
print ('num fg: {}'.format((labels > 0).sum()))
print ('num bg: {}'.format((labels == 0).sum()))
self._count += 1
self._fg_num += (labels > 0).sum()
self._bg_num += (labels == 0).sum()
print ('num fg avg: {}'.format(self._fg_num / self._count))
print ('num bg avg: {}'.format(self._bg_num / self._count))
print ('ratio: {:.3f}'.format(float(self._fg_num) / float(self._bg_num)))
# pad with zeros if too few rois were found
num_found_rois = rois.shape[0]
if num_found_rois < rois_per_image:
rois_padded = np.zeros((rois_per_image, rois.shape[1]), dtype=np.float32)
rois_padded[:num_found_rois, :] = rois
rois = rois_padded
labels_padded = np.zeros((rois_per_image), dtype=np.float32)
labels_padded[:num_found_rois] = labels
labels = labels_padded
bbox_targets_padded = np.zeros((rois_per_image, bbox_targets.shape[1]), dtype=np.float32)
bbox_targets_padded[:num_found_rois, :] = bbox_targets
bbox_targets = bbox_targets_padded
bbox_inside_weights_padded = np.zeros((rois_per_image, bbox_inside_weights.shape[1]), dtype=np.float32)
bbox_inside_weights_padded[:num_found_rois, :] = bbox_inside_weights
bbox_inside_weights = bbox_inside_weights_padded
# for CNTK: get rid of batch ind zeros and add batch axis
rois = rois[:,1:]
# sampled rois
rois.shape = (1,) + rois.shape
outputs[self.outputs[0]] = np.ascontiguousarray(rois)
# classification labels
labels_as_int = [i.item() for i in labels.astype(int)]
labels_dense = np.eye(self._num_classes, dtype=np.float32)[labels_as_int]
labels_dense.shape = (1,) + labels_dense.shape # batch axis
outputs[self.outputs[1]] = labels_dense
# bbox_targets
bbox_targets.shape = (1,) + bbox_targets.shape # batch axis
outputs[self.outputs[2]] = np.ascontiguousarray(bbox_targets)
# bbox_inside_weights
bbox_inside_weights.shape = (1,) + bbox_inside_weights.shape # batch axis
outputs[self.outputs[3]] = np.ascontiguousarray(bbox_inside_weights)
def backward(self, state, root_gradients, variables):
"""This layer does not propagate gradients."""
pass
def clone(self, cloned_inputs):
return ProposalTargetLayer(cloned_inputs[0], cloned_inputs[1], param_str=self.param_str_)
def serialize(self):
internal_state = {}
internal_state['param_str'] = self.param_str_
return internal_state
@staticmethod
def deserialize(inputs, name, state):
param_str = state['param_str']
return ProposalTargetLayer(inputs[0], inputs[1], name=name, param_str=param_str)
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0].astype(int)
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = [1.0, 1.0, 1.0, 1.0]
return bbox_targets, bbox_inside_weights
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack((labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes, deterministic=False):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg["TRAIN"].FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
if deterministic:
fg_inds = fg_inds[:fg_rois_per_this_image]
else:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg["TRAIN"].BG_THRESH_HI) &
(max_overlaps >= cfg["TRAIN"].BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
if deterministic:
bg_inds = bg_inds[:bg_rois_per_this_image]
else:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
rois = all_rois[keep_inds]
bbox_target_data = _compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)
bbox_targets, bbox_inside_weights = \
_get_bbox_regression_labels(bbox_target_data, num_classes)
return labels, rois, bbox_targets, bbox_inside_weights
|
|
__author__ = "Mohammadjavad valipoor"
__copyright__ = "Copyright 2015, SoftTelecom"
import json
import logging
from Queue import Queue
import random
import string
import datetime
def config_logger(log_level=logging.DEBUG):
logging.basicConfig(format='%(levelname)s %(asctime)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
log_level=log_level)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
hdlr = logging.FileHandler('presentation_api_log.txt')
formatter = logging.Formatter(fmt='%(levelname)s %(asctime)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
return logger
LOG = config_logger()
class JSONManager:
def __init__(self):
pass
def read(self,myjson):
try:
decoded = json.loads(myjson)
except (ValueError, KeyError, TypeError):
return -1
return decoded
def jprint(self,myjson):
try:
decoded = json.loads(myjson)
# pretty printing of json-formatted string
print json.dumps(decoded, sort_keys=True, indent=4)
#print "JSON parsing example: ", decoded['one'] print "Complex JSON parsing example: ", decoded['two']['list'][1]['item']
except (ValueError, KeyError, TypeError):
print "JSON format error"
class Message(object):
def __init__(self, from_whom, to_whom, task, date_time):
self.from_whom = from_whom
self.to_whom = to_whom
self.task = task
self.date_time = date_time
def get_msg(self):
return self.to_whom + "_from_" + self.from_whom
def get_json(self):
return json.dumps({"from": self.from_whom, "to": self.to_whom, "task": self.task, "date_time": str(self.date_time)})
def get_from(self):
return self.from_whom
def get_to(self):
return self.to_whom
def get_task(self):
return self.task
def get_date_time(self):
return self.date_time
def set_from(self, value):
self.from_whom = value
def set_to(self, value):
self.to_whom = value
def set_task(self, value):
self.task = value
def set_date_time(self, value):
self.date_time = str(value)
class Application:
def __init__(self):
self.ctype = 'text/plain'
self.jsontype = 'application/json'
self.allowed_users = []
self.SERVER_ERROR_PARSE_JSON = 'Error in parsing input json'
self.SERVER_ERROR_SET_CONFIG_JSON = 'Error while setting instance json config file'
self.SERVER_ERROR_DEPLOY_NOT_FINISHED = 'Deployment not finished'
self.SERVER_ERROR_DB_NOT_READY = 'Database instance is not ready yet'
self.SERVER_ERROR_DB_NOT_FOUND = 'Database not found'
self.SERVER_ERROR_CALL_INSTANCE = 'Exception raised while trying to call application'
self.SERVER_ERROR_CALL_PROVISION_SCRIPT = 'Wrong number of parameters for the script'
self.message_queue = Queue()
self.component_status = {
"slaaas":"None",
"aaaaas":"None",
"dnsaas":"None",
"monaas":"None",
"icnaas":"None",
"cms1":"None",
"cms2":"None",
"cms3":"None",
"mcr":"None",
"db":"None",
"lbaas":"None",
"so":"None"
}
def __call__(self, environ, start_response):
self.environ=environ
self.start_response=start_response
if environ['PATH_INFO'] == '/v1.0/test':
return self.test()
elif environ['PATH_INFO'] == '/v1.0/service_ready':
return self.service_ready()
elif environ['PATH_INFO'] == '/v1.0/message':
return self.message_mgnt()
elif environ['PATH_INFO'] == '/v1.0/auth':
return self.auth()
else:
return self.not_found()
def service_ready(self):
if self.environ['REQUEST_METHOD'] == 'GET':
response_body = json.dumps(self.component_status)
self.start_response('200 OK', [('Content-Type', self.jsontype), ('Content-Length', str(len(response_body)))])
return [response_body]
elif self.environ['REQUEST_METHOD'] == 'PUT':
#get JSON from PAYLOAD
from cStringIO import StringIO
length = self.environ.get('CONTENT_LENGTH', '0')
length = 0 if length == '' else int(length)
body = self.environ['wsgi.input'].read(length)
jsonm = JSONManager()
jsonm.jprint(body)
init_json = jsonm.read(body)
if (init_json == -1):
return self.servererror(self.SERVER_ERROR_PARSE_JSON)
#check auth
if not (self.token_exists(init_json["user"]) == init_json["token"]):
return self.unauthorised()
#check user/pass
for item in init_json["components"]:
self.component_status[item["name"]] = "deployed"
LOG.debug("Status of specified components changed to ready.")
response_body = json.dumps({"Message":"Status of specified components changed to ready."})
self.start_response('200 OK', [('Content-Type', self.jsontype), ('Content-Length', str(len(response_body)))])
LOG.debug(str([response_body]))
return [response_body]
elif self.environ['REQUEST_METHOD'] == 'POST':
#get JSON from PAYLOAD
from cStringIO import StringIO
length = self.environ.get('CONTENT_LENGTH', '0')
length = 0 if length == '' else int(length)
body = self.environ['wsgi.input'].read(length)
jsonm = JSONManager()
jsonm.jprint(body)
init_json = jsonm.read(body)
if (init_json == -1):
return self.servererror(self.SERVER_ERROR_PARSE_JSON)
#check auth
if not (self.token_exists(init_json["user"]) == init_json["token"]):
return self.unauthorised()
#check user/pass
for item in init_json["components"]:
self.component_status[item["name"]] = "configured"
LOG.debug("Status of specified components changed to ready.")
response_body = json.dumps({"Message":"Status of specified components changed to ready."})
self.start_response('200 OK', [('Content-Type', self.jsontype), ('Content-Length', str(len(response_body)))])
LOG.debug(str([response_body]))
return [response_body]
elif self.environ['REQUEST_METHOD'] == 'DELETE':
#get JSON from PAYLOAD
from cStringIO import StringIO
length = self.environ.get('CONTENT_LENGTH', '0')
length = 0 if length == '' else int(length)
body = self.environ['wsgi.input'].read(length)
jsonm = JSONManager()
jsonm.jprint(body)
init_json = jsonm.read(body)
if (init_json == -1):
return self.servererror(self.SERVER_ERROR_PARSE_JSON)
#check auth
if not (self.token_exists(init_json["user"]) == init_json["token"]):
return self.unauthorised()
#check user/pass
for item in init_json["components"]:
self.component_status[item["name"]] = "None"
LOG.debug("Status of specified components changed to NOT ready.")
response_body = json.dumps({"Message":"Status of specified components changed to NOT ready."})
self.start_response('200 OK', [('Content-Type', self.jsontype), ('Content-Length', str(len(response_body)))])
LOG.debug(str([response_body]))
return [response_body]
else:
return self.not_found()
def message_mgnt(self):
if self.environ['REQUEST_METHOD'] == 'POST':
#get JSON from PAYLOAD
from cStringIO import StringIO
length = self.environ.get('CONTENT_LENGTH', '0')
length = 0 if length == '' else int(length)
body = self.environ['wsgi.input'].read(length)
jsonm = JSONManager()
jsonm.jprint(body)
init_json = jsonm.read(body)
if (init_json == -1):
return self.servererror(self.SERVER_ERROR_PARSE_JSON)
#check auth
if not (self.token_exists(init_json["user"]) == init_json["token"]):
return self.unauthorised()
#check user/pass
if self.message_queue.qsize() > 0:
tmp_msg = self.message_queue.get()
else:
tmp_msg = Message("dummy", "dummy", "dummy", "dummy")
LOG.debug("Message popped from the queue. Content: " + tmp_msg.get_json())
response_body = tmp_msg.get_json()
self.start_response('200 OK', [('Content-Type', self.jsontype), ('Content-Length', str(len(response_body)))])
LOG.debug(str([response_body]))
return [response_body]
elif self.environ['REQUEST_METHOD'] == 'PUT':
#get JSON from PAYLOAD
from cStringIO import StringIO
length = self.environ.get('CONTENT_LENGTH', '0')
length = 0 if length == '' else int(length)
body = self.environ['wsgi.input'].read(length)
jsonm = JSONManager()
jsonm.jprint(body)
init_json = jsonm.read(body)
if (init_json == -1):
return self.servererror(self.SERVER_ERROR_PARSE_JSON)
#check auth
if not (self.token_exists(init_json["user"]) == init_json["token"]):
return self.unauthorised()
#check user/pass
tmp_msg = Message(init_json["from_whom"], init_json["to_whom"], init_json["task"], datetime.datetime.now())
self.message_queue.put(tmp_msg)
LOG.debug("Message pushed to the queue.")
response_body = json.dumps({"Message":"Message pushed to the queue."})
self.start_response('200 OK', [('Content-Type', self.jsontype), ('Content-Length', str(len(response_body)))])
LOG.debug(str([response_body]))
return [response_body]
else:
return self.not_found()
def auth(self):
if self.environ['REQUEST_METHOD'] == 'POST':
#get JSON from PAYLOAD
from cStringIO import StringIO
length = self.environ.get('CONTENT_LENGTH', '0')
length = 0 if length == '' else int(length)
body = self.environ['wsgi.input'].read(length)
jsonm = JSONManager()
jsonm.jprint(body)
auth_json = jsonm.read(body)
if (auth_json == -1):
return self.servererror(self.SERVER_ERROR_PARSE_JSON)
#check user/pass
username = str(auth_json["user"])
result = self.token_exists(username)
if result is None:
if (username == "SO" and auth_json["password"] == "SO"):
token = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
user_object = {'user': auth_json["user"], 'token': token}
self.allowed_users.append(user_object)
response_body = '{"user":"SO","token":"' + token + '"}'
elif (username == "HTML5" and auth_json["password"] == "HTML5"):
token = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16))
user_object = {'user': auth_json["user"], 'token': token}
self.allowed_users.append(user_object)
response_body = '{"user":"HTML5","token":"' + token + '"}'
else:
return self.unauthorised()
else:
response_body = '{"user":"' + username + '","token":"' + result + '"}'
self.start_response('200 OK', [('Content-Type', 'text/json'), ('Content-Length', str(len(response_body)))])
return [response_body]
#everything went fine
else:
return self.not_found()
def token_exists(self, username):
for item in self.allowed_users:
if item["user"] == username:
return item["token"]
return None
# ////////////////ERROR MGMT////////////////////
def not_found(self):
"""Called if no URL matches."""
self.start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
return ['Not Found']
def unauthorised(self):
"""Called if no URL matches."""
self.start_response('401 UNAUTHORIZED', [('Content-Type', 'text/plain')])
return ['Unauthorised']
def servererror(self, err_description = None):
"""Called if no URL matches."""
self.start_response('500 INTERNAL SERVER ERROR', [('Content-Type', 'text/plain')])
if err_description is None:
err_description = 'Request error'
return [err_description]
application = Application()
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('', 8055, application)
httpd.serve_forever()
|
|
from collections import defaultdict
from typing import List, Type, Dict, Union, Tuple, Optional, Sequence
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models, IntegrityError
from django.db.models.query import QuerySet
from django.utils.translation import gettext_lazy as _
from etc.toolbox import get_model_class_from_string
from .settings import MODEL_FLAG
from .utils import get_flag_model
if False: # pragma: nocover
from django.contrib.auth.models import User # noqa
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
TypeFlagsForType = List['FlagBase']
TypeFlagsForTypes = Dict[Type[models.Model], TypeFlagsForType]
class FlagBase(models.Model):
"""Base class for flag models.
Flags are marks on various site entities (model instances).
Inherit from this model and override SITEFLAGS_FLAG_MODEL in settings.py
to customize model fields and behaviour.
"""
note = models.TextField(_('Note'), blank=True)
status = models.IntegerField(_('Status'), null=True, blank=True, db_index=True)
user = models.ForeignKey(
USER_MODEL, related_name='%(class)s_users', verbose_name=_('User'),
on_delete=models.CASCADE)
time_created = models.DateTimeField(_('Date created'), auto_now_add=True)
# Here follows a link to an object.
object_id = models.PositiveIntegerField(verbose_name=_('Object ID'), db_index=True)
content_type = models.ForeignKey(
ContentType, verbose_name=_('Content type'),
related_name='%(app_label)s_%(class)s_flags',
on_delete=models.CASCADE)
linked_object = GenericForeignKey()
class Meta:
abstract = True
verbose_name = _('Flag')
verbose_name_plural = _('Flags')
unique_together = (
'content_type',
'object_id',
'user',
'status',
)
@classmethod
def get_flags_for_types(
cls,
mdl_classes: List[Type[models.Model]],
*,
user: 'User' = None,
status: int = None,
allow_empty: bool = True,
with_objects: bool = False,
) -> TypeFlagsForTypes:
"""Returns a dictionary with flag objects associated with the given model classes (types).
The dictionary is indexed by model classes.
Each dict entry contains a list of associated flags.
:param mdl_classes: Types to get flags for.
:param user: User filter,
:param status: Status filter
:param allow_empty: Flag. Include results for all given types, even those without associated flags.
:param with_objects: Whether to fetch the flagged objects along with the flags.
"""
if not mdl_classes or (user and not user.id):
return {}
types_for_models = ContentType.objects.get_for_models(*mdl_classes, for_concrete_models=False)
filter_kwargs = {'content_type__in': types_for_models.values()}
update_filter_dict(filter_kwargs, user=user, status=status)
flags = cls.objects.filter(**filter_kwargs)
if with_objects:
flags = flags.prefetch_related('linked_object')
flags = flags.order_by('-time_created')
flags_dict = defaultdict(list)
for flag in flags:
flags_dict[flag.content_type_id].append(flag)
result = {} # Respect initial order.
for mdl_cls in mdl_classes:
content_type_id = types_for_models[mdl_cls].id
if content_type_id in flags_dict:
result[mdl_cls] = flags_dict[content_type_id]
elif allow_empty:
result[mdl_cls] = []
return result
@classmethod
def get_flags_for_objects(
cls,
objects_list: Union[QuerySet, Sequence],
*,
user: 'User' = None,
status: int = None
) -> Dict[int, TypeFlagsForType]:
"""Returns a dictionary with flag objects associated with the given model objects.
The dictionary is indexed by objects IDs.
Each dict entry contains a list of associated flag objects.
:param objects_list:
:param user:
:param status:
"""
if not objects_list or (user and not user.id):
return {}
objects_ids = objects_list
if not isinstance(objects_list, QuerySet):
objects_ids = [obj.pk for obj in objects_list]
filter_kwargs = {
'object_id__in': objects_ids,
# Consider this list homogeneous.
'content_type': ContentType.objects.get_for_model(objects_list[0], for_concrete_model=False)
}
update_filter_dict(filter_kwargs, user=user, status=status)
flags = cls.objects.filter(**filter_kwargs)
flags_dict = defaultdict(list)
for flag in flags:
flags_dict[flag.object_id].append(flag)
result = {}
for obj in objects_list:
result[obj.pk] = flags_dict.get(obj.pk, [])
return result
def __str__(self):
return f'{self.content_type}:{self.object_id} status {self.status}'
class Flag(FlagBase):
"""Built-in flag class. Default functionality."""
class ModelWithFlag(models.Model):
"""Helper base class for models with flags.
Inherit from this model to be able to mark model instances.
"""
flags = GenericRelation(MODEL_FLAG)
class Meta:
abstract = True
@classmethod
def get_flags_for_type(
cls,
mdl_classes: List[Type[models.Model]] = None,
*,
user: 'User' = None,
status: int = None,
allow_empty: bool = True,
with_objects: bool = False,
) -> Union[TypeFlagsForTypes, TypeFlagsForType]:
"""Returns a dictionary with flag objects associated with
the given model classes (types) if mdl_classes is given.
The dictionary is indexed by model classes.
Each dict entry contains a list of associated flag objects.
If mdl_classes is not given, returns a list of associated
flag objects for this very class.
:param mdl_classes: Types to get flags for. If not set the current class is used.
:param user: User filter,
:param status: Status filter
:param allow_empty: Flag. Include results for all given types, even those without associated flags.
:param with_objects: Whether to fetch the flagged objects along with the flags.
"""
model: FlagBase = get_model_class_from_string(MODEL_FLAG)
single_type = False
if mdl_classes is None:
mdl_classes = [cls]
single_type = True
allow_empty = True
result = model.get_flags_for_types(
mdl_classes,
user=user,
status=status,
allow_empty=allow_empty,
with_objects=with_objects,
)
if single_type:
result = result[cls]
return result
get_flags_for_types = get_flags_for_type # alias
@classmethod
def get_flags_for_objects(
cls,
objects_list: Union[QuerySet, Sequence],
*,
user: 'User' = None,
status: int = None
) -> Dict[int, TypeFlagsForType]:
"""Returns a dictionary with flag objects associated with the given model objects.
The dictionary is indexed by objects IDs.
Each dict entry contains a list of associated flag objects.
:param objects_list:
:param user:
:param status:
"""
model: FlagBase = get_model_class_from_string(MODEL_FLAG)
return model.get_flags_for_objects(objects_list, user=user, status=status)
def get_flags(self, user: 'User' = None, *, status: int = None) -> Union[QuerySet, Sequence[FlagBase]]:
"""Returns flags for the object optionally filtered by status.
:param user: Optional user filter
:param status: Optional status filter
"""
filter_kwargs = {}
update_filter_dict(filter_kwargs, user=user, status=status)
return self.flags.filter(**filter_kwargs).all()
def set_flag(self, user: 'User', *, note: str = None, status: int = None) -> Optional[FlagBase]:
"""Flags the object.
:param user:
:param note: User-defined note for this flag.
:param status: Optional status integer (the meaning is defined by a developer).
"""
if not user.id:
return None
init_kwargs = {
'user': user,
'linked_object': self,
}
if note is not None:
init_kwargs['note'] = note
if status is not None:
init_kwargs['status'] = status
flag = get_flag_model()(**init_kwargs)
try:
flag.save()
except IntegrityError: # Record already exists.
return None
return flag
def remove_flag(self, user: 'User' = None, *, status: int = None):
"""Removes flag(s) from the object.
:param user: Optional user filter
:param status: Optional status filter
"""
filter_kwargs = {
'content_type': ContentType.objects.get_for_model(self),
'object_id': self.id
}
update_filter_dict(filter_kwargs, user=user, status=status)
get_flag_model().objects.filter(**filter_kwargs).delete()
def is_flagged(self, user: 'User' = None, *, status: int = None) -> int:
"""Returns a number of times the object is flagged by a user.
:param user: Optional user filter
:param status: Optional status filter
"""
filter_kwargs = {
'content_type': ContentType.objects.get_for_model(self),
'object_id': self.id,
}
update_filter_dict(filter_kwargs, user=user, status=status)
return self.flags.filter(**filter_kwargs).count()
def update_filter_dict(d: dict, *, user: Optional['User'], status: Optional[int]):
"""Helper. Updates filter dict for a queryset.
:param d:
:param user:
:param status:
"""
if user is not None:
if not user.id:
return None
d['user'] = user
if status is not None:
d['status'] = status
|
|
import pandas as pd
import numpy as np
import chess
import matplotlib.pyplot as plt
import guerilla.train.stockfish_eval as sf
import guerilla.train.chess_game_parser as cgp
import guerilla.data_handler as dh
from guerilla.players import Guerilla
def metric_by_move(weight_files, labels=None, num_values=100, metric='mean', verbose=False):
# Displays accuracy metric by move
# Potential metrics are 'err_mean', 'err_variance', 'mean', 'variance'
# Respectively: mean error (abs(predicted - actual)), variance of mean, mean score, variance of score
labels = [weight_files] if labels is None else labels
# Load data
if verbose:
print "Loading data.."
actual = sf.load_stockfish_values(num_values=num_values)
fens = cgp.load_fens(num_values=len(actual))
if verbose:
print "Loaded %d FENs and scores." % len(fens)
# Predict values and get move numbers
for w, weight_file in enumerate(weight_files):
if verbose:
print "Generating predictions..."
move_num = [0] * len(fens) # halfmove
with Guerilla('Harambe', load_file=weight_file) as g:
predicted = get_predictions(g, fens, verbose)
for i, fen in enumerate(fens):
move_num[i] = 2 * (int(dh.strip_fen(fen, 5)) - 1) + (1 if dh.black_is_next(fen) else 0)
# Convert to dataframe for plotting
if verbose:
print "Converting to dataframe..."
df = pd.DataFrame({'fens': fens, 'move_num': move_num, 'actual': actual, 'predicted': predicted})
df['abs_error'] = np.abs(df['actual'] - df['predicted'])
# Group
if verbose:
print "Grouping..."
g = df.groupby('move_num')
mean_data = g.aggregate(np.mean)
var_data = g.aggregate(np.var)
if metric == 'err_mean':
x = mean_data['abs_error']
elif metric == 'err_variance':
x = var_data['abs_error']
elif metric == 'mean':
x = mean_data['predicted']
elif metric == 'variance':
x = var_data['predicted']
else:
raise ValueError("Metric %s has not been implemented!" % metric)
plt.plot(x, label=labels[w], color=plt.cm.cool(w * 1.0 / len(weight_files)))
if metric == 'mean':
plt.plot(mean_data['actual'], label='actual', color='k')
elif metric == 'variance':
plt.plot(var_data['actual'], label='actual', color='k')
plt.xlabel('Half-Move')
plt.ylabel('%s' % metric)
plt.xlim([0, 100])
# plt.ylim([0, int(7e6)])
plt.title('%s by Move' % metric)
plt.legend()
plt.show()
def prediction_distribution(weight_files, labels=None, bins=None, num_values=500, verbose=False):
actual = sf.load_stockfish_values(num_values=num_values)
fens = cgp.load_fens(num_values=num_values)
labels = [weight_files] if labels is None else labels
# Predict values and get move numbers
for w, weight_file in enumerate(weight_files):
if verbose:
print "Generating predictions for %s..." % weight_file
with Guerilla('Harambe', load_file=weight_file) as g:
predicted = get_predictions(g, fens, verbose)
plt.hist(predicted, bins=bins, linewidth=1.5, alpha=1.0, label=labels[w], histtype='step',
color=plt.cm.cool(w * 1.0 / len(weight_files)))
plt.title(weight_file)
# plt.hist()
plt.hist(actual, bins=bins, linewidth=1.5, label='SF', histtype='step')
plt.legend()
plt.title('Score Histogram')
plt.show()
def error_by_depth(weight_file, min_depth=1, max_depth=3, num_values=1000):
actual = sf.load_stockfish_values(num_values=num_values)
fens = cgp.load_fens(num_values=num_values)
# Parameters
binwidth = 25
with Guerilla('Harambe', load_file=weight_file, search_type='minimax') as g:
for depth in range(min_depth, max_depth + 1):
# Set depth
g.search.max_depth = depth
g.search.reset()
predicted = get_predictions(g, fens, mode='search', verbose=True) # Get predictions
error = abs(np.array(actual) - np.array(predicted))
plt.subplot((max_depth - min_depth + 1), 1, depth)
# Make sum = 1
weights = np.ones_like(error) / float(len(error))
plt.hist(error, weights=weights, bins=range(0, 5000 + binwidth, binwidth))
# Properties
plt.ylim([0, 1.0])
plt.title('Depth %s' % depth)
plt.axvline(x=np.mean(error), color='k')
print "Depth %d MEAN: %f STD: %f VAR: %f" % (depth, np.mean(error), np.std(error), np.var(error))
err_greater = [x for x in error if x > 500]
err_lesser = [x for x in error if x <= 500]
print "Depth %d: %d predictions with an error > 500" % (depth, len(err_greater))
print "Depth %d: Mean of errors > 500 is %f" % (depth, np.mean(err_greater))
print "Depth %d: Mean of errors < 500 is %f" % (depth, np.mean(err_lesser))
plt.ylabel('Frequency')
plt.xlabel('Abs Error')
plt.show()
def distr_by_depth(weight_file, fen, min_depth=1, max_depth=3):
actual = sf.stockfish_eval_fn(fen)
print "Actual value %f" % actual
# Parameters
binwidth = 25
root_score = {}
scores_by_depth = {depth: [] for depth in range(min_depth, max_depth + 1)}
with Guerilla('Harambe', load_file=weight_file, search_type='iterativedeepening') as g:
for depth in range(min_depth, max_depth + 1):
# Search
print "Running depth %d" % depth
g.search.max_depth = depth
g.search.ab_prune = False # turn off pruning
board = chess.Board(fen)
score, move, _ = g.search.run(board)
print "%f %s" % (score, move)
root_score[depth] = score
# Travel through depths
queue = [g.search.root]
while queue != []:
curr = queue.pop(0) # pop front of queue
# Push children to queue
for child in curr.children.itervalues():
queue.append(child)
# only store leaf boards
if curr.depth == depth:
scores_by_depth[curr.depth].append(curr.value)
# Plot
for depth, values in scores_by_depth.iteritems():
plt.subplot((max_depth - min_depth + 1), 1, depth)
# Make sum = 1
weights = np.ones_like(values) / float(len(values))
plt.hist(values, weights=weights, bins=range(-5000, 5000 + binwidth, binwidth))
# Properties
plt.ylim([0, 1.0])
plt.title('Depth %s' % depth)
plt.axvline(x=np.mean(actual), color='k')
plt.axvline(x=root_score[depth], color='r')
plt.ylabel('Frequency')
plt.xlabel('Value')
plt.show()
def get_predictions(guerilla, fens, mode=None, verbose=False):
mode = 'eval' if mode is None else mode
if verbose:
print "Generating predictions for %s..." % guerilla.name
predictions = [0] * len(fens)
for i, fen in enumerate(fens):
if mode == 'eval':
predictions[i] = guerilla.get_cp_adv_white(fen)
elif mode == 'search':
board = chess.Board(fen)
score, _, _ = guerilla.search.run(board)
predictions[i] = score
else:
raise ValueError("Prediction mode %s has not been implemented!" % mode)
if verbose:
print_perc = 5
if (i % (len(fens) / (100.0 / print_perc)) - 100.0 / len(fens)) < 0:
print "%d%% " % (i / (len(fens) / 100.0)),
print ''
return predictions
def main():
# labels=[str(i) for i in range(25, 125, 25)]
# weight_files = ['var_check_2_%s.p' % label for label in labels]
# weight_files = ['var_check_old/var_check_2_250.p', 'var_check_old/var_check_2_100.p', ]
# labels = ['TD', 'TD + loss']
# metric_by_move(weight_files, labels, num_values=5000, verbose=True, metric='variance')
# bins = range(-5000, 5100, 100)
#
# # Add original
# weight_files = ['6811.p'] + weight_files
# labels = ['no_TD'] + labels
#
# prediction_distribution(weight_files, labels=labels, bins=bins, num_values=10000, verbose=True)
# error_by_depth('6811.p', num_values=2000)
distr_by_depth('6811.p', fen='1r3rk1/8/3p3p/p1qP2p1/R1b1P3/2Np1P2/1P1Q1RP1/6K1 w - - 0 1')
if __name__ == '__main__':
main()
|
|
import theano
from .. import init
from .. import nonlinearities
from .base import Layer
from .conv import conv_output_length
from ..utils import as_tuple
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from theano.sandbox.cuda.blas import GpuCorrMM
__all__ = [
"MMLayer",
"Conv2DMMLayer",
]
if not theano.config.device.startswith("gpu"):
raise ImportError("requires a GPU to work") # pragma: no cover
# base class for all layers that rely on GpuCorrMM directly
class MMLayer(Layer):
pass
class Conv2DMMLayer(MMLayer):
"""
lasagne.layers.Conv2DMMLayer(incoming, num_filters, filter_size,
stride=(1, 1), pad=0, untie_biases=False,
W=lasagne.init.GlorotUniform(), b=lasagne.init.Constant(0.),
nonlinearity=lasagne.nonlinearities.rectify, flip_filters=False,
**kwargs)
2D convolutional layer
Performs a 2D convolution on its input and optionally adds a bias and
applies an elementwise nonlinearity. This is an alternative implementation
which uses ``theano.sandbox.cuda.blas.GpuCorrMM`` directly.
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape. The
output of this layer should be a 4D tensor, with shape
``(batch_size, num_input_channels, input_rows, input_columns)``.
num_filters : int
The number of learnable convolutional filters this layer has.
filter_size : int or iterable of int
An integer or a 2-element tuple specifying the size of the filters.
stride : int or iterable of int
An integer or a 2-element tuple specifying the stride of the
convolution operation.
pad : int, iterable of int, 'full', 'same' or 'valid' (default: 0)
By default, the convolution is only computed where the input and the
filter fully overlap (a valid convolution). When ``stride=1``, this
yields an output that is smaller than the input by ``filter_size - 1``.
The `pad` argument allows you to implicitly pad the input with zeros,
extending the output size.
A single integer results in symmetric zero-padding of the given size on
all borders, a tuple of two integers allows different symmetric padding
per dimension.
``'full'`` pads with one less than the filter size on both sides. This
is equivalent to computing the convolution wherever the input and the
filter overlap by at least one position.
``'same'`` pads with half the filter size (rounded down) on both sides.
When ``stride=1`` this results in an output size equal to the input
size. Even filter size is not supported.
``'valid'`` is an alias for ``0`` (no padding / a valid convolution).
Note that ``'full'`` and ``'same'`` can be faster than equivalent
integer values due to optimizations by Theano.
untie_biases : bool (default: False)
If ``False``, the layer will have a bias parameter for each channel,
which is shared across all positions in this channel. As a result, the
`b` attribute will be a vector (1D).
If True, the layer will have separate bias parameters for each
position in each channel. As a result, the `b` attribute will be a
3D tensor.
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a 4D tensor with shape
``(num_filters, num_input_channels, filter_rows, filter_columns)``.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_filters,)`` if `untied_biases` is set to
``False``. If it is set to ``True``, its shape should be
``(num_filters, output_rows, output_columns)`` instead.
See :func:`lasagne.utils.create_param` for more information.
nonlinearity : callable or None
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
flip_filters : bool (default: False)
Whether to flip the filters and perform a convolution, or not to flip
them and perform a correlation. Flipping adds a bit of overhead, so it
is disabled by default. In most cases this does not make a difference
anyway because the filters are learnt. However, ``flip_filters`` should
be set to ``True`` if weights are loaded into it that were learnt using
a regular :class:`lasagne.layers.Conv2DLayer`, for example.
**kwargs
Any additional keyword arguments are passed to the `Layer` superclass.
Attributes
----------
W : Theano shared variable
Variable representing the filter weights.
b : Theano shared variable
Variable representing the biases.
Notes
-----
Unlike :class:`lasagne.layers.Conv2DLayer`, this layer properly supports
``pad='same'``. It is not emulated. This should result in better
performance.
"""
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad=0, untie_biases=False, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
flip_filters=False, **kwargs):
super(Conv2DMMLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, 2)
self.stride = as_tuple(stride, 2)
self.untie_biases = untie_biases
self.flip_filters = flip_filters
if pad == 'valid':
self.pad = (0, 0)
elif pad == 'full':
self.pad = (self.filter_size[0] - 1, self.filter_size[1] - 1)
elif pad == 'same':
if any(s % 2 == 0 for s in self.filter_size):
raise NotImplementedError(
'`same` padding requires odd filter size.')
self.pad = (self.filter_size[0] // 2, self.filter_size[1] // 2)
else:
self.pad = as_tuple(pad, 2, int)
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
if self.untie_biases:
biases_shape = (num_filters, self.output_shape[2],
self.output_shape[3])
else:
biases_shape = (num_filters,)
self.b = self.add_param(b, biases_shape, name="b",
regularizable=False)
self.corr_mm_op = GpuCorrMM(subsample=self.stride,
border_mode=self.pad)
def get_W_shape(self):
num_input_channels = self.input_shape[1]
return (self.num_filters, num_input_channels, self.filter_size[0],
self.filter_size[1])
def get_output_shape_for(self, input_shape):
batch_size = input_shape[0]
pad = self.pad if isinstance(self.pad, tuple) else (self.pad,) * 2
output_rows = conv_output_length(input_shape[2],
self.filter_size[0],
self.stride[0],
pad[0])
output_columns = conv_output_length(input_shape[3],
self.filter_size[1],
self.stride[1],
pad[1])
return (batch_size, self.num_filters, output_rows, output_columns)
def get_output_for(self, input, **kwargs):
filters = self.W
if self.flip_filters:
filters = filters[:, :, ::-1, ::-1] # flip top-down, left-right
contiguous_filters = gpu_contiguous(filters)
contiguous_input = gpu_contiguous(input)
conved = self.corr_mm_op(contiguous_input, contiguous_filters)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
|
|
# -*- coding: utf-8 -*-
"""
hyper/http11/connection
~~~~~~~~~~~~~~~~~~~~~~~
Objects that build hyper's connection-level HTTP/1.1 abstraction.
"""
import logging
import os
import socket
import base64
from collections import Iterable, Mapping
from .response import HTTP11Response
from ..tls import wrap_socket, H2C_PROTOCOL
from ..common.bufsocket import BufferedSocket
from ..common.exceptions import TLSUpgrade, HTTPUpgrade
from ..common.headers import HTTPHeaderMap
from ..common.util import to_bytestring, to_host_port_tuple
from ..compat import bytes
from ..packages.hyperframe.frame import SettingsFrame
# We prefer pycohttpparser to the pure-Python interpretation
try: # pragma: no cover
from pycohttpparser.api import Parser
except ImportError: # pragma: no cover
from .parser import Parser
log = logging.getLogger(__name__)
BODY_CHUNKED = 1
BODY_FLAT = 2
class HTTP11Connection(object):
"""
An object representing a single HTTP/1.1 connection to a server.
:param host: The host to connect to. This may be an IP address or a
hostname, and optionally may include a port: for example,
``'twitter.com'``, ``'twitter.com:443'`` or ``'127.0.0.1'``.
:param port: (optional) The port to connect to. If not provided and one also
isn't provided in the ``host`` parameter, defaults to 80.
:param secure: (optional) Whether the request should use TLS. Defaults to
``False`` for most requests, but to ``True`` for any request issued to
port 443.
:param ssl_context: (optional) A class with custom certificate settings.
If not provided then hyper's default ``SSLContext`` is used instead.
:param proxy_host: (optional) The proxy to connect to. This can be an IP
address or a host name and may include a port.
:param proxy_port: (optional) The proxy port to connect to. If not provided
and one also isn't provided in the ``proxy`` parameter,
defaults to 8080.
"""
def __init__(self, host, port=None, secure=None, ssl_context=None,
proxy_host=None, proxy_port=None, **kwargs):
if port is None:
self.host, self.port = to_host_port_tuple(host, default_port=80)
else:
self.host, self.port = host, port
# Record whether we plan to secure the request. In future this should
# be extended to a security profile, but a bool will do for now.
# TODO: Actually do something with this!
if secure is not None:
self.secure = secure
elif self.port == 443:
self.secure = True
else:
self.secure = False
# only send http upgrade headers for non-secure connection
self._send_http_upgrade = not self.secure
self.ssl_context = ssl_context
self._sock = None
# Setup proxy details if applicable.
if proxy_host:
if proxy_port is None:
self.proxy_host, self.proxy_port = to_host_port_tuple(proxy_host, default_port=8080)
else:
self.proxy_host, self.proxy_port = proxy_host, proxy_port
else:
self.proxy_host = None
self.proxy_port = None
#: The size of the in-memory buffer used to store data from the
#: network. This is used as a performance optimisation. Increase buffer
#: size to improve performance: decrease it to conserve memory.
#: Defaults to 64kB.
self.network_buffer_size = 65536
#: The object used to perform HTTP/1.1 parsing. Needs to conform to
#: the standard hyper parsing interface.
self.parser = Parser()
def connect(self):
"""
Connect to the server specified when the object was created. This is a
no-op if we're already connected.
:returns: Nothing.
"""
if self._sock is None:
if not self.proxy_host:
host = self.host
port = self.port
else:
host = self.proxy_host
port = self.proxy_port
sock = socket.create_connection((host, port), 5)
proto = None
if self.secure:
assert not self.proxy_host, "Using a proxy with HTTPS not yet supported."
sock, proto = wrap_socket(sock, host, self.ssl_context)
log.debug("Selected protocol: %s", proto)
sock = BufferedSocket(sock, self.network_buffer_size)
if proto not in ('http/1.1', None):
raise TLSUpgrade(proto, sock)
self._sock = sock
return
def request(self, method, url, body=None, headers={}):
"""
This will send a request to the server using the HTTP request method
``method`` and the selector ``url``. If the ``body`` argument is
present, it should be string or bytes object of data to send after the
headers are finished. Strings are encoded as UTF-8. To use other
encodings, pass a bytes object. The Content-Length header is set to the
length of the body field.
:param method: The request method, e.g. ``'GET'``.
:param url: The URL to contact, e.g. ``'/path/segment'``.
:param body: (optional) The request body to send. Must be a bytestring
or a file-like object.
:param headers: (optional) The headers to send on the request.
:returns: Nothing.
"""
method = to_bytestring(method)
url = to_bytestring(url)
if not isinstance(headers, HTTPHeaderMap):
if isinstance(headers, Mapping):
headers = HTTPHeaderMap(headers.items())
elif isinstance(headers, Iterable):
headers = HTTPHeaderMap(headers)
else:
raise ValueError('Header argument must be a dictionary or an iterable')
if self._sock is None:
self.connect()
if self._send_http_upgrade:
self._add_upgrade_headers(headers)
self._send_http_upgrade = False
# We may need extra headers.
if body:
body_type = self._add_body_headers(headers, body)
if b'host' not in headers:
headers[b'host'] = self.host
# Begin by emitting the header block.
self._send_headers(method, url, headers)
# Next, send the request body.
if body:
self._send_body(body, body_type)
return
def get_response(self):
"""
Returns a response object.
This is an early beta, so the response object is pretty stupid. That's
ok, we'll fix it later.
"""
headers = HTTPHeaderMap()
response = None
while response is None:
# 'encourage' the socket to receive data.
self._sock.fill()
response = self.parser.parse_response(self._sock.buffer)
for n, v in response.headers:
headers[n.tobytes()] = v.tobytes()
self._sock.advance_buffer(response.consumed)
if (response.status == 101 and
b'upgrade' in headers['connection'] and
H2C_PROTOCOL.encode('utf-8') in headers['upgrade']):
raise HTTPUpgrade(H2C_PROTOCOL, self._sock)
return HTTP11Response(
response.status,
response.msg.tobytes(),
headers,
self._sock,
self
)
def _send_headers(self, method, url, headers):
"""
Handles the logic of sending the header block.
"""
self._sock.send(b' '.join([method, url, b'HTTP/1.1\r\n']))
for name, value in headers.iter_raw():
name, value = to_bytestring(name), to_bytestring(value)
header = b''.join([name, b': ', value, b'\r\n'])
self._sock.send(header)
self._sock.send(b'\r\n')
def _add_body_headers(self, headers, body):
"""
Adds any headers needed for sending the request body. This will always
defer to the user-supplied header content.
:returns: One of (BODY_CHUNKED, BODY_FLAT), indicating what type of
request body should be used.
"""
if b'content-length' in headers:
return BODY_FLAT
if b'chunked' in headers.get(b'transfer-encoding', []):
return BODY_CHUNKED
# For bytestring bodies we upload the content with a fixed length.
# For file objects, we use the length of the file object.
if isinstance(body, bytes):
length = str(len(body)).encode('utf-8')
elif hasattr(body, 'fileno'):
length = str(os.fstat(body.fileno()).st_size).encode('utf-8')
else:
length = None
if length:
headers[b'content-length'] = length
return BODY_FLAT
headers[b'transfer-encoding'] = b'chunked'
return BODY_CHUNKED
def _add_upgrade_headers(self, headers):
# Add HTTP Upgrade headers.
headers[b'connection'] = b'Upgrade, HTTP2-Settings'
headers[b'upgrade'] = H2C_PROTOCOL
# Encode SETTINGS frame payload in Base64 and put into the HTTP-2 Settings header.
http2_settings = SettingsFrame(0)
http2_settings.settings[SettingsFrame.INITIAL_WINDOW_SIZE] = 65535
headers[b'HTTP2-Settings'] = base64.b64encode(http2_settings.serialize_body())
def _send_body(self, body, body_type):
"""
Handles the HTTP/1.1 logic for sending HTTP bodies. This does magical
different things in different cases.
"""
if body_type == BODY_FLAT:
# Special case for files and other 'readable' objects.
if hasattr(body, 'read'):
while True:
block = body.read(16*1024)
if not block:
break
try:
self._sock.send(block)
except TypeError:
raise ValueError(
"File objects must return bytestrings"
)
return
# Case for bytestrings.
elif isinstance(body, bytes):
self._sock.send(body)
return
# Iterables that set a specific content length.
else:
for item in body:
try:
self._sock.send(item)
except TypeError:
raise ValueError("Body must be a bytestring")
return
# Chunked! For chunked bodies we don't special-case, we just iterate
# over what we have and send stuff out.
for chunk in body:
length = '{0:x}'.format(len(chunk)).encode('ascii')
# For now write this as four 'send' calls. That's probably
# inefficient, let's come back to it.
try:
self._sock.send(length)
self._sock.send(b'\r\n')
self._sock.send(chunk)
self._sock.send(b'\r\n')
except TypeError:
raise ValueError(
"Iterable bodies must always iterate in bytestrings"
)
self._sock.send(b'0\r\n\r\n')
return
def close(self):
"""
Closes the connection. This closes the socket and then abandons the
reference to it. After calling this method, any outstanding
:class:`Response <hyper.http11.response.Response>` objects will throw
exceptions if attempts are made to read their bodies.
In some cases this method will automatically be called.
.. warning:: This method should absolutely only be called when you are
certain the connection object is no longer needed.
"""
self._sock.close()
self._sock = None
# The following two methods are the implementation of the context manager
# protocol.
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
return False # Never swallow exceptions.
|
|
from __future__ import unicode_literals
import logging
import urlparse
from mopidy.audio import PlaybackState
from mopidy.core import listener
logger = logging.getLogger(__name__)
# TODO: split mixing out from playback?
class PlaybackController(object):
pykka_traversable = True
def __init__(self, mixer, backends, core):
self.mixer = mixer
self.backends = backends
self.core = core
self._state = PlaybackState.STOPPED
self._volume = None
self._mute = False
def _get_backend(self):
# TODO: take in track instead
if self.current_tl_track is None:
return None
uri = self.current_tl_track.track.uri
uri_scheme = urlparse.urlparse(uri).scheme
return self.backends.with_playback.get(uri_scheme, None)
# Properties
def get_current_tl_track(self):
return self.current_tl_track
current_tl_track = None
"""
The currently playing or selected :class:`mopidy.models.TlTrack`, or
:class:`None`.
"""
def get_current_track(self):
return self.current_tl_track and self.current_tl_track.track
current_track = property(get_current_track)
"""
The currently playing or selected :class:`mopidy.models.Track`.
Read-only. Extracted from :attr:`current_tl_track` for convenience.
"""
def get_state(self):
return self._state
def set_state(self, new_state):
(old_state, self._state) = (self.state, new_state)
logger.debug('Changing state: %s -> %s', old_state, new_state)
self._trigger_playback_state_changed(old_state, new_state)
state = property(get_state, set_state)
"""
The playback state. Must be :attr:`PLAYING`, :attr:`PAUSED`, or
:attr:`STOPPED`.
Possible states and transitions:
.. digraph:: state_transitions
"STOPPED" -> "PLAYING" [ label="play" ]
"STOPPED" -> "PAUSED" [ label="pause" ]
"PLAYING" -> "STOPPED" [ label="stop" ]
"PLAYING" -> "PAUSED" [ label="pause" ]
"PLAYING" -> "PLAYING" [ label="play" ]
"PAUSED" -> "PLAYING" [ label="resume" ]
"PAUSED" -> "STOPPED" [ label="stop" ]
"""
def get_time_position(self):
backend = self._get_backend()
if backend:
return backend.playback.get_time_position().get()
else:
return 0
time_position = property(get_time_position)
"""Time position in milliseconds."""
def get_volume(self):
if self.mixer:
return self.mixer.get_volume().get()
else:
# For testing
return self._volume
def set_volume(self, volume):
if self.mixer:
self.mixer.set_volume(volume)
else:
# For testing
self._volume = volume
volume = property(get_volume, set_volume)
"""Volume as int in range [0..100] or :class:`None` if unknown. The volume
scale is linear.
"""
def get_mute(self):
if self.mixer:
return self.mixer.get_mute().get()
else:
# For testing
return self._mute
def set_mute(self, value):
value = bool(value)
if self.mixer:
self.mixer.set_mute(value)
else:
# For testing
self._mute = value
mute = property(get_mute, set_mute)
"""Mute state as a :class:`True` if muted, :class:`False` otherwise"""
# Methods
# TODO: remove this.
def change_track(self, tl_track, on_error_step=1):
"""
Change to the given track, keeping the current playback state.
:param tl_track: track to change to
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:param on_error_step: direction to step at play error, 1 for next
track (default), -1 for previous track
:type on_error_step: int, -1 or 1
"""
old_state = self.state
self.stop()
self.current_tl_track = tl_track
if old_state == PlaybackState.PLAYING:
self.play(on_error_step=on_error_step)
elif old_state == PlaybackState.PAUSED:
self.pause()
# TODO: this is not really end of track, this is on_need_next_track
def on_end_of_track(self):
"""
Tell the playback controller that end of track is reached.
Used by event handler in :class:`mopidy.core.Core`.
"""
if self.state == PlaybackState.STOPPED:
return
original_tl_track = self.current_tl_track
next_tl_track = self.core.tracklist.eot_track(original_tl_track)
if next_tl_track:
self.change_track(next_tl_track)
else:
self.stop()
self.current_tl_track = None
self.core.tracklist.mark_played(original_tl_track)
def on_tracklist_change(self):
"""
Tell the playback controller that the current playlist has changed.
Used by :class:`mopidy.core.TracklistController`.
"""
if self.current_tl_track not in self.core.tracklist.tl_tracks:
self.stop()
self.current_tl_track = None
def next(self):
"""
Change to the next track.
The current playback state will be kept. If it was playing, playing
will continue. If it was paused, it will still be paused, etc.
"""
tl_track = self.core.tracklist.next_track(self.current_tl_track)
if tl_track:
# TODO: switch to:
# backend.play(track)
# wait for state change?
self.change_track(tl_track)
else:
self.stop()
self.current_tl_track = None
def pause(self):
"""Pause playback."""
backend = self._get_backend()
if not backend or backend.playback.pause().get():
# TODO: switch to:
# backend.track(pause)
# wait for state change?
self.state = PlaybackState.PAUSED
self._trigger_track_playback_paused()
def play(self, tl_track=None, on_error_step=1):
"""
Play the given track, or if the given track is :class:`None`, play the
currently active track.
:param tl_track: track to play
:type tl_track: :class:`mopidy.models.TlTrack` or :class:`None`
:param on_error_step: direction to step at play error, 1 for next
track (default), -1 for previous track
:type on_error_step: int, -1 or 1
"""
assert on_error_step in (-1, 1)
if tl_track is None:
if self.state == PlaybackState.PAUSED:
return self.resume()
if self.current_tl_track is not None:
tl_track = self.current_tl_track
else:
if on_error_step == 1:
tl_track = self.core.tracklist.next_track(tl_track)
elif on_error_step == -1:
tl_track = self.core.tracklist.previous_track(tl_track)
if tl_track is None:
return
assert tl_track in self.core.tracklist.tl_tracks
# TODO: switch to:
# backend.play(track)
# wait for state change?
if self.state == PlaybackState.PLAYING:
self.stop()
self.current_tl_track = tl_track
self.state = PlaybackState.PLAYING
backend = self._get_backend()
success = backend and backend.playback.play(tl_track.track).get()
if success:
self.core.tracklist.mark_playing(tl_track)
self.core.history.add(tl_track.track)
# TODO: replace with stream-changed
self._trigger_track_playback_started()
else:
self.core.tracklist.mark_unplayable(tl_track)
if on_error_step == 1:
# TODO: can cause an endless loop for single track repeat.
self.next()
elif on_error_step == -1:
self.previous()
def previous(self):
"""
Change to the previous track.
The current playback state will be kept. If it was playing, playing
will continue. If it was paused, it will still be paused, etc.
"""
tl_track = self.current_tl_track
# TODO: switch to:
# self.play(....)
# wait for state change?
self.change_track(
self.core.tracklist.previous_track(tl_track), on_error_step=-1)
def resume(self):
"""If paused, resume playing the current track."""
if self.state != PlaybackState.PAUSED:
return
backend = self._get_backend()
if backend and backend.playback.resume().get():
self.state = PlaybackState.PLAYING
# TODO: trigger via gst messages
self._trigger_track_playback_resumed()
# TODO: switch to:
# backend.resume()
# wait for state change?
def seek(self, time_position):
"""
Seeks to time position given in milliseconds.
:param time_position: time position in milliseconds
:type time_position: int
:rtype: :class:`True` if successful, else :class:`False`
"""
if not self.core.tracklist.tracks:
return False
if self.state == PlaybackState.STOPPED:
self.play()
elif self.state == PlaybackState.PAUSED:
self.resume()
if time_position < 0:
time_position = 0
elif time_position > self.current_track.length:
self.next()
return True
backend = self._get_backend()
if not backend:
return False
success = backend.playback.seek(time_position).get()
if success:
self._trigger_seeked(time_position)
return success
def stop(self):
"""Stop playing."""
if self.state != PlaybackState.STOPPED:
backend = self._get_backend()
time_position_before_stop = self.time_position
if not backend or backend.playback.stop().get():
self.state = PlaybackState.STOPPED
self._trigger_track_playback_ended(time_position_before_stop)
def _trigger_track_playback_paused(self):
logger.debug('Triggering track playback paused event')
if self.current_track is None:
return
listener.CoreListener.send(
'track_playback_paused',
tl_track=self.current_tl_track, time_position=self.time_position)
def _trigger_track_playback_resumed(self):
logger.debug('Triggering track playback resumed event')
if self.current_track is None:
return
listener.CoreListener.send(
'track_playback_resumed',
tl_track=self.current_tl_track, time_position=self.time_position)
def _trigger_track_playback_started(self):
logger.debug('Triggering track playback started event')
if self.current_tl_track is None:
return
listener.CoreListener.send(
'track_playback_started',
tl_track=self.current_tl_track)
def _trigger_track_playback_ended(self, time_position_before_stop):
logger.debug('Triggering track playback ended event')
if self.current_tl_track is None:
return
listener.CoreListener.send(
'track_playback_ended',
tl_track=self.current_tl_track,
time_position=time_position_before_stop)
def _trigger_playback_state_changed(self, old_state, new_state):
logger.debug('Triggering playback state change event')
listener.CoreListener.send(
'playback_state_changed',
old_state=old_state, new_state=new_state)
def _trigger_seeked(self, time_position):
logger.debug('Triggering seeked event')
listener.CoreListener.send('seeked', time_position=time_position)
|
|
# =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from sqlalchemy import Column, Table, Boolean
from sqlalchemy import String, Integer, DateTime, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext import mutable
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship, synonym
from utils import is_empty
from salts import encode, decode
from database import engine
from database import DBSession
from database import STRING1, STRING2
from database import CURRENT_SCHEMA_VERSION
from constants import UNKNOWN
from constants import JobStatus
from constants import UserPrivilege
from constants import ProxyAgent
import datetime
import logging
import traceback
from werkzeug import check_password_hash
from werkzeug import generate_password_hash
from ldap_utils import ldap_auth
from csm_exceptions import CSMLDAPException
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
from itsdangerous import (TimedJSONWebSignatureSerializer
as Serializer, BadSignature, SignatureExpired)
from flask.ext.httpauth import HTTPBasicAuth
# Contains information for password encryption
encrypt_dict = None
class JSONEncodedDict(TypeDecorator):
impl = Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
mutable.MutableDict.associate_with(JSONEncodedDict)
Base = declarative_base()
class User(Base):
"""A user login, with credentials and authentication."""
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(50), nullable=False, index=True)
# encrypted password is much longer in length
_password = Column('password', String(100), nullable=False)
privilege = Column(String(20), nullable=False)
fullname = Column(String(100), nullable=False)
email = Column(String(200), nullable=False)
active = Column(Boolean, default=True)
# host password is used when CSM Server user credential is used for host login.
_host_password = Column('host_password', String(100))
# Note the lack of parenthesis after datetime.utcnow. This is the correct way
# so SQLAlchemhy can make a run time call during row insertion.
created_time = Column(DateTime, default=datetime.datetime.utcnow)
modified_time = Column(DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow)
preferences = relationship("Preferences",
order_by="Preferences.id",
backref="user",
cascade="all, delete, delete-orphan")
install_job = relationship("InstallJob",
order_by="InstallJob.id",
backref="user",
cascade="all, delete, delete-orphan")
download_job = relationship("DownloadJob",
order_by="DownloadJob.id",
backref="user",
cascade="all, delete, delete-orphan")
download_job_history = relationship("DownloadJobHistory",
order_by="desc(DownloadJobHistory.created_time)",
backref="host",
cascade="all, delete, delete-orphan")
csm_message = relationship("CSMMessage",
cascade="all, delete, delete-orphan")
conformance_report = relationship("ConformanceReport",
cascade="all, delete, delete-orphan")
def _get_password(self):
return self._password
def _set_password(self, password):
if password:
password = password.strip()
self.host_password = password
self._password = generate_password_hash(password)
@property
def host_password(self):
global encrypt_dict
return decode(encrypt_dict, self._host_password)
@host_password.setter
def host_password(self, value):
global encrypt_dict
self._host_password = encode(encrypt_dict, value)
password_descriptor = property(_get_password, _set_password)
password = synonym('_password', descriptor=password_descriptor)
def check_password(self, password):
if self.password is None:
return False
password = password.strip()
if not password:
return False
return check_password_hash(self.password, password)
@classmethod
def authenticate(cls, query, username, password):
username = username.strip().lower()
db_session = DBSession()
# Authenticate with LDAP Server first
system_option = SystemOption.get(db_session)
ldap_authenticated = False
try:
if system_option.enable_ldap_auth and not is_empty(username) and not is_empty(password):
ldap_authenticated = ldap_auth(system_option, username, password)
except CSMLDAPException:
# logger.exception("authenticate hit exception")
pass
user = query(cls).filter(cls.username == username).first()
if ldap_authenticated:
if user is None:
# Create a LDAP user with Network Administrator privilege
user = create_user(db_session, username, password, UserPrivilege.NETWORK_ADMIN, username, username)
return user, True
else:
# Update the password
if not is_empty(password):
user.password = password
db_session.commit()
if user is None:
return None, False
if not user.active:
return user, False
authenticated = user.check_password(password)
# This is for backward compatibility. Existing users before the feature "Use CSM Server User Credential"
# will need to have their password encrypted for device installation authentication.
if authenticated and is_empty(user.host_password):
user.host_password = password
db_session.commit()
return user, user.check_password(password)
@staticmethod
def verify_auth_token(token):
s = Serializer('CSMSERVER')
db_session = DBSession()
try:
data = s.loads(token)
except SignatureExpired:
return None # valid token, but expired
except BadSignature:
return None # invalid token
user = db_session.query(User).filter_by(id = data['id']).first()
return user
def generate_auth_token(self, expiration=600):
s = Serializer('CSMSERVER', expires_in=expiration)
return s.dumps({'id': self.id})
# Hooks for Flask-Login.
#
# As methods, these are only valid for User instances, so the
# authentication will have already happened in the view functions.
#
# If you prefer, you can use Flask-Login's UserMixin to get these methods.
def get_id(self):
return str(self.id)
def is_active(self):
return True
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def __repr__(self):
return u'<{self.__class__.__name__}: {self.id}>'.format(self=self)
class Host(Base):
__tablename__ = 'host'
id = Column(Integer, primary_key=True)
hostname = Column(String(50), nullable=False, index=True)
family = Column(String(20), default=UNKNOWN)
platform = Column(String(20), default=UNKNOWN)
software_platform = Column(String(20), default=UNKNOWN)
software_version = Column(String(20))
os_type = Column(String(20))
roles = Column(String(100))
region_id = Column(Integer, ForeignKey('region.id'))
proxy_agent = Column(String(30), default=ProxyAgent.CSM_SERVER)
can_schedule = Column(Boolean, default=True)
can_install = Column(Boolean, default=True)
created_time = Column(DateTime, default=datetime.datetime.utcnow)
created_by = Column(String(50))
region = relationship('Region', foreign_keys='Host.region_id')
context = relationship("HostContext",
cascade="all, delete, delete-orphan")
connection_param = relationship("ConnectionParam",
order_by="ConnectionParam.id",
backref="host",
cascade="all, delete, delete-orphan")
inventory_job = relationship("InventoryJob",
cascade="all, delete, delete-orphan")
inventory_job_history = relationship("InventoryJobHistory",
order_by="desc(InventoryJobHistory.created_time)",
backref="host",
cascade="all, delete, delete-orphan")
packages = relationship("Package",
order_by="Package.id",
backref="host",
cascade="all, delete, delete-orphan")
install_job = relationship("InstallJob",
order_by="asc(InstallJob.scheduled_time)",
backref="host",
cascade="all, delete, delete-orphan")
install_job_history = relationship("InstallJobHistory",
order_by="desc(InstallJobHistory.created_time)",
backref="host",
cascade="all, delete, delete-orphan")
UDIs = relationship("UDI",
order_by="asc(UDI.name)",
backref="host",
cascade="all, delete, delete-orphan")
def get_json(self):
result = {}
result['hostname'] = self.hostname
try:
if len(self.packages) > 0:
package_list_dict = {}
# loop through individual package
for index, package in enumerate(self.packages):
package_dict = {}
modules_package_state = package.modules_package_state
modules_package_state_dict = {}
if modules_package_state:
for module_package_state in modules_package_state:
modules_package_state_dict[module_package_state.module_name] = \
module_package_state.package_state
if len(modules_package_state_dict) > 0:
package_dict['modules'] = modules_package_state_dict
package_dict['state'] = package.state
package_dict['package'] = package.name
package_list_dict[index] = package_dict
result['packages'] = package_list_dict
except:
logger.exception('Host.get_json() hits exception')
return result
class HostContext(Base):
__tablename__ = 'host_context'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict, default={})
host_id = Column(Integer, ForeignKey('host.id'), unique=True)
modified_time = Column(DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow)
class ConnectionParam(Base):
__tablename__ = 'connection_param'
id = Column(Integer, primary_key=True)
# Multiple IPs can be specified using comma as the delimiter
host_or_ip = Column(String(100), nullable=False)
username = Column(String(50), nullable=False)
_password = Column('password', String(100), nullable=False)
connection_type = Column(String(10), nullable=False)
# Multiple Ports can be specified using comma as the delimiter
port_number = Column(String(100), default='')
host_id = Column(Integer, ForeignKey('host.id'))
jump_host_id = Column(Integer, ForeignKey('jump_host.id'))
jump_host = relationship("JumpHost", foreign_keys='ConnectionParam.jump_host_id')
@property
def password(self):
global encrypt_dict
return decode(encrypt_dict, self._password)
@password.setter
def password(self, value):
global encrypt_dict
self._password = encode(encrypt_dict, value)
class UDI(Base):
__tablename__ = 'udi'
id = Column(Integer, primary_key=True)
name = Column(String(50))
description = Column(String(100))
pid = Column(String(30))
vid = Column(String(10))
sn = Column(String(30))
host_id = Column(Integer, ForeignKey('host.id'))
class JumpHost(Base):
__tablename__ = 'jump_host'
id = Column(Integer, primary_key=True)
hostname = Column(String(100), nullable=False, index=True)
host_or_ip = Column(String(50), nullable=False)
username = Column(String(50), nullable=False)
_password = Column('password', String(100), nullable=False)
connection_type = Column(String(10), nullable=False)
port_number = Column(String(10), default='')
created_time = Column(DateTime, default=datetime.datetime.utcnow)
created_by = Column(String(50))
@property
def password(self):
global encrypt_dict
return decode(encrypt_dict, self._password)
@password.setter
def password(self, value):
global encrypt_dict
self._password = encode(encrypt_dict, value)
class InventoryJob(Base):
__tablename__ = 'inventory_job'
id = Column(Integer, primary_key=True)
pending_submit = Column(Boolean, default=True)
status = Column(String(200))
status_time = Column(DateTime)
last_successful_time = Column(DateTime)
session_log = Column(Text)
host_id = Column(Integer, ForeignKey('host.id'), unique=True)
host = relationship('Host', foreign_keys='InventoryJob.host_id')
def set_status(self, status):
self.status = status
self.status_time = datetime.datetime.utcnow()
if self.status == JobStatus.COMPLETED:
self.last_successful_time = self.status_time
class InventoryJobHistory(Base):
__tablename__ = 'inventory_job_history'
id = Column(Integer, primary_key=True)
status = Column(String(200))
status_time = Column(DateTime)
trace = Column(Text)
session_log = Column(Text)
created_time = Column(DateTime, default=datetime.datetime.utcnow)
host_id = Column(Integer, ForeignKey('host.id'))
def set_status(self, status):
self.status = status
self.status_time = datetime.datetime.utcnow()
class Package(Base):
__tablename__ = 'package'
id = Column(Integer, primary_key=True)
location = Column(String(20))
name = Column(String(100), nullable=False)
state = Column(String(20), nullable=False)
host_id = Column(Integer, ForeignKey('host.id'))
modules_package_state = relationship("ModulePackageState",
order_by="ModulePackageState.module_name",
backref="package",
cascade="all, delete, delete-orphan")
class ModulePackageState(Base):
__tablename__ = 'module_package_state'
id = Column(Integer, primary_key=True)
module_name = Column(String(20), nullable=False)
package_state = Column(String(20), nullable=False)
package_id = Column(Integer, ForeignKey('package.id'))
class InstallJob(Base):
__tablename__ = 'install_job'
id = Column(Integer, primary_key=True)
install_action = Column(String(50))
dependency = Column(Integer)
server_id = Column(Integer, ForeignKey('server.id'))
server_directory = Column(String(300))
packages = Column(Text)
pending_downloads = Column(Text)
scheduled_time = Column(DateTime)
start_time = Column(DateTime)
status = Column(String(200))
status_time = Column(DateTime)
trace = Column(Text)
session_log = Column(Text)
created_time = Column(DateTime, default=datetime.datetime.utcnow)
modified_time = Column(DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow)
created_by = Column(String(50))
host_id = Column(Integer, ForeignKey('host.id'))
user_id = Column(Integer, ForeignKey('user.id'))
custom_command_profile_id = Column(String(20))
def set_status(self, status):
self.status = status
self.status_time = datetime.datetime.utcnow()
class InstallJobHistory(Base):
__tablename__ = 'install_job_history'
id = Column(Integer, primary_key=True)
install_action = Column(String(50))
dependency = Column(Integer)
packages = Column(Text)
scheduled_time = Column(DateTime)
start_time = Column(DateTime)
status = Column(String(200))
status_time = Column(DateTime)
operation_id = Column(Integer, default=-1)
trace = Column(Text)
install_job_id = Column(Integer, index=True, unique=False)
session_log = Column(Text)
created_time = Column(DateTime, default=datetime.datetime.utcnow)
created_by = Column(String(50))
host_id = Column(Integer, ForeignKey('host.id'))
def set_status(self, status):
self.status = status
self.status_time = datetime.datetime.utcnow()
class Region(Base):
__tablename__ = 'region'
id = Column(Integer, primary_key=True)
name = Column(String(100), index=True)
created_time = Column(DateTime, default=datetime.datetime.utcnow)
created_by = Column(String(50))
servers = relationship('Server', order_by="Server.hostname", secondary=lambda: RegionServer)
class Server(Base):
__tablename__ = 'server'
id = Column(Integer, primary_key=True)
hostname = Column(String(100), index=True)
server_type = Column(String(20))
server_url = Column(String(100))
vrf = Column(String(100))
username = Column(String(100))
_password = Column('password', String(100))
server_directory = Column(String(100))
created_time = Column(DateTime, default=datetime.datetime.utcnow)
created_by = Column(String(50))
regions = relationship('Region', order_by="Region.name", secondary=lambda: RegionServer)
@property
def password(self):
global encrypt_dict
return decode(encrypt_dict, self._password)
@password.setter
def password(self, value):
global encrypt_dict
self._password = encode(encrypt_dict, value)
class SMTPServer(Base):
__tablename__ = 'smtp_server'
id = Column(Integer, primary_key=True)
server = Column(String(50))
server_port = Column(String(10))
sender = Column(String(50))
use_authentication = Column(Boolean, default=False)
username = Column(String(50))
_password = Column('password', String(100))
secure_connection = Column(String(10))
@property
def password(self):
global encrypt_dict
return decode(encrypt_dict, self._password)
@password.setter
def password(self, value):
global encrypt_dict
self._password = encode(encrypt_dict, value)
RegionServer = Table('region_server', Base.metadata,
Column('region_id', Integer, ForeignKey("region.id"), primary_key=True),
Column('server_id', Integer, ForeignKey("server.id"), primary_key=True))
class Preferences(Base):
__tablename__ = 'preferences'
id = Column(Integer, primary_key=True)
excluded_platforms_and_releases = Column(Text)
cco_username = Column(String(50))
_cco_password = Column('cco_password', String(100))
user_id = Column(Integer, ForeignKey('user.id'))
@property
def cco_password(self):
global encrypt_dict
return decode(encrypt_dict, self._cco_password)
@cco_password.setter
def cco_password(self, value):
global encrypt_dict
self._cco_password = encode(encrypt_dict, value)
@classmethod
def get(cls, db_session, user_id):
return db_session.query(Preferences).filter(Preferences.user_id == user_id).first()
class DownloadJob(Base):
__tablename__ = 'download_job'
id = Column(Integer, primary_key=True)
cco_filename = Column(String(50))
scheduled_time = Column(DateTime, default=datetime.datetime.utcnow)
pid = Column(String(200))
mdf_id = Column(String(200))
software_type_id = Column(String(20))
server_id = Column(Integer)
server_directory = Column(String(300))
status = Column(String(200))
status_time = Column(DateTime)
trace = Column(Text)
session_log = Column(Text)
created_time = Column(DateTime, default=datetime.datetime.utcnow)
created_by = Column(String(50))
user_id = Column(Integer, ForeignKey('user.id'))
def set_status(self, status):
self.status = status
self.status_time = datetime.datetime.utcnow()
class DownloadJobHistory(Base):
__tablename__ = 'download_job_history'
id = Column(Integer, primary_key=True)
cco_filename = Column(String(50))
scheduled_time = Column(DateTime)
pid = Column(String(200))
mdf_id = Column(String(200))
software_type_id = Column(String(20))
server_id = Column(Integer)
server_directory = Column(String(300))
status = Column(String(200))
status_time = Column(DateTime)
trace = Column(Text)
session_log = Column(Text)
created_time = Column(DateTime, default=datetime.datetime.utcnow)
created_by = Column(String(50))
user_id = Column(Integer, ForeignKey('user.id'))
def set_status(self, status):
self.status = status
self.status_time = datetime.datetime.utcnow()
class CCOCatalog(Base):
__tablename__ = 'cco_catalog'
platform = Column(String(40), primary_key=True)
release = Column(String(40), primary_key=True)
class SMUMeta(Base):
__tablename__ = 'smu_meta'
# name is like asr9k_px_4.2.3
platform_release = Column(String(40), primary_key=True)
created_time = Column(String(30)) # Use string instead of timestamp
smu_software_type_id = Column(String(20))
sp_software_type_id = Column(String(20))
tar_software_type_id = Column(String(20))
file_suffix = Column(String(10))
pid = Column(String(200))
mdf_id = Column(String(200))
retrieval_time = Column(DateTime)
smu_info = relationship("SMUInfo",
backref="smu_meta",
cascade="all, delete, delete-orphan")
class SMUInfo(Base):
__tablename__ = 'smu_info'
id = Column(String(100), primary_key=True)
name = Column(String(50))
status = Column(String(20))
type = Column(String(20)) # Recommended, Optional, PSIRT
package_type = Column(String(20))
posted_date = Column(String(30))
eta_date = Column(String(30))
ddts = Column(String(20))
description = Column(Text)
impact = Column(String(50))
_cco_filename = Column("cco_filename", String(50))
functional_areas = Column(Text)
package_bundles = Column(Text)
composite_DDTS = Column(Text)
compressed_image_size = Column(String(20))
uncompressed_image_size = Column(String(20))
supersedes = Column(Text)
superseded_by = Column(Text)
prerequisites = Column(Text)
prerequisite_to = Column(Text)
platform_release = Column(String(40), ForeignKey('smu_meta.platform_release'))
@property
def cco_filename(self):
# Somehow,PIMS did not fill in the cco_filename
if is_empty(self._cco_filename):
return self.name + '.tar'
return self._cco_filename
@cco_filename.setter
def cco_filename(self, value):
self._cco_filename = value
class SystemVersion(Base):
__tablename__ = 'system_version'
id = Column(Integer, primary_key=True)
schema_version = Column(Integer, default=CURRENT_SCHEMA_VERSION)
software_version = Column(String(10), default='1.0')
@classmethod
def get(cls, db_session):
return db_session.query(SystemVersion).first()
class DeviceUDI(Base):
__tablename__ = 'device_udi'
id = Column(Integer, primary_key=True)
platform = Column(String(50))
pid = Column(String(50))
version = Column(String(50))
serial_number = Column(String(50))
@classmethod
def get(cls, db_session):
return db_session.query(DeviceUDI).first()
class SystemOption(Base):
__tablename__ = 'system_option'
id = Column(Integer, primary_key=True)
inventory_threads = Column(Integer, default=5)
install_threads = Column(Integer, default=10)
download_threads = Column(Integer, default=5)
can_schedule = Column(Boolean, default=True)
can_install = Column(Boolean, default=True)
enable_email_notify = Column(Boolean, default=False)
enable_inventory = Column(Boolean, default=True)
inventory_hour = Column(Integer, default=0)
inventory_history_per_host = Column(Integer, default=10)
download_history_per_user = Column(Integer, default=100)
install_history_per_host = Column(Integer, default=100)
total_system_logs = Column(Integer, default=2000)
enable_default_host_authentication = Column(Boolean, default=False)
default_host_username = Column(String(50))
_default_host_password = Column('default_host_password', String(100))
default_host_authentication_choice = Column(String(10), default="1")
base_url = Column(String(100))
enable_ldap_auth = Column(Boolean, default=False)
enable_ldap_host_auth = Column(Boolean, default=False)
ldap_server_url = Column(String(100))
enable_cco_lookup = Column(Boolean, default=True)
cco_lookup_time = Column(DateTime)
enable_user_credential_for_host = Column(Boolean, default=False)
@property
def default_host_password(self):
global encrypt_dict
return decode(encrypt_dict, self._default_host_password)
@default_host_password.setter
def default_host_password(self, value):
global encrypt_dict
self._default_host_password = encode(encrypt_dict, value)
@classmethod
def get(cls, db_session):
return db_session.query(SystemOption).first()
class Encrypt(Base):
__tablename__ = 'encrypt'
id = Column(Integer, primary_key=True)
key = Column(String(30), default=datetime.datetime.utcnow().strftime("%m/%d/%Y %I:%M %p"))
string1 = Column(String(100), default=STRING1)
string2 = Column(String(100), default=STRING2)
@classmethod
def get(cls, db_session):
return db_session.query(Encrypt).first()
class Log(Base):
__tablename__ = 'log'
id = Column(Integer, primary_key=True)
level = Column(String(20))
trace = Column(Text)
msg = Column(Text)
created_time = Column(DateTime)
class CSMMessage(Base):
__tablename__ = 'csm_message'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
acknowledgment_date = Column(DateTime)
class SoftwareProfile(Base):
__tablename__ = 'software_profile'
id = Column(Integer, primary_key=True)
name = Column(String(100))
description = Column(Text)
packages = Column(Text)
created_by = Column(String(50))
class ConformanceReport(Base):
__tablename__ = 'conformance_report'
id = Column(Integer, primary_key=True)
software_profile = Column(String(100))
software_profile_packages = Column(Text)
match_criteria = Column(String(30))
hostnames = Column(Text)
host_not_in_conformance = Column(Integer, default=0)
host_out_dated_inventory = Column(Integer, default=0)
created_time = Column(DateTime, default=datetime.datetime.utcnow)
created_by = Column(String(50))
user_id = Column(Integer, ForeignKey('user.id'))
entries = relationship("ConformanceReportEntry",
order_by="ConformanceReportEntry.hostname",
backref="conformance_report",
cascade="all, delete, delete-orphan")
class ConformanceReportEntry(Base):
__tablename__ = 'conformance_report_entry'
id = Column(Integer, primary_key=True)
hostname = Column(String(50))
platform = Column(String(20))
software = Column(String(20))
host_packages = Column(Text)
missing_packages = Column(Text)
conformed = Column(String(3))
comments = Column(String(50))
conformance_report_id = Column(Integer, ForeignKey('conformance_report.id'))
class EmailJob(Base):
__tablename__ = 'email_job'
id = Column(Integer, primary_key=True)
recipients = Column(String(200))
message = Column(Text)
scheduled_time = Column(DateTime, default=datetime.datetime.utcnow)
status = Column(String(200))
status_time = Column(DateTime)
created_by = Column(String(50))
def set_status(self, status):
self.status = status
self.status_time = datetime.datetime.utcnow()
class CreateTarJob(Base):
__tablename__ = 'create_tar_job'
id = Column(Integer, primary_key=True)
server_id = Column(Integer)
server_directory = Column(String(300))
source_tars = Column(Text)
contents = Column(Text)
additional_packages = Column(Text)
new_tar_name = Column(String(50))
status = Column(String(200))
status_time = Column(DateTime)
created_by = Column(String(50))
def set_status(self, status):
self.status = status
self.status_time = datetime.datetime.utcnow()
class CustomCommandProfile(Base):
__tablename__ = 'custom_command_profile'
id = Column(Integer, primary_key=True)
profile_name = Column(String(50))
command_list = Column(Text)
created_by = Column(String(50))
Base.metadata.create_all(engine)
class LogHandler(logging.Handler):
def __init__(self, db_session):
logging.Handler.__init__(self)
self.db_session = db_session
def emit(self, record):
trace = traceback.format_exc() if record.__dict__['exc_info'] else None
args = record.__dict__['args']
msg = record.__dict__['msg']
if len(args) >= 1:
msg = msg % args
log = Log(
level=record.__dict__['levelname'],
trace=trace,
msg=msg,
created_time=datetime.datetime.utcnow())
self.db_session.add(log)
self.db_session.commit()
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logger.addHandler(LogHandler(DBSession()))
def get_db_session_logger(db_session):
"""
Return a session specific logger. This is necessary especially
if the db_session is from a different process address space.
"""
session_logger = logging.getLogger('session_logger')
session_logger.setLevel(logging.DEBUG)
session_logger.addHandler(LogHandler(db_session))
return session_logger
def get_download_job_key_dict():
result = {}
db_session = DBSession()
download_jobs = db_session.query(DownloadJob).all()
for download_job in download_jobs:
download_job_key = "{}{}{}{}".format(download_job.user_id,download_job.cco_filename,
download_job.server_id, download_job.server_directory)
result[download_job_key] = download_job
return result
def init_system_version():
db_session = DBSession()
if db_session.query(SystemVersion).count() == 0:
db_session.add(SystemVersion())
db_session.commit()
def create_user(db_session, username, password, privilege, fullname, email):
user = User(
username=username,
password=password,
privilege=privilege,
fullname=fullname,
email=email)
user.preferences.append(Preferences())
db_session.add(user)
db_session.commit()
return user
def init_user():
db_session = DBSession()
# Setup a default cisco user if none exists
if db_session.query(User).count() == 0:
create_user(db_session, 'root', 'root', UserPrivilege.ADMIN, 'admin', 'admin')
def init_system_option():
db_session = DBSession()
if db_session.query(SystemOption).count() == 0:
db_session.add(SystemOption())
db_session.commit()
def init_encrypt():
global encrypt_dict
db_session = DBSession()
if db_session.query(Encrypt).count() == 0:
db_session.add(Encrypt())
db_session.commit()
encrypt_dict = dict(Encrypt.get(db_session).__dict__)
def initialize():
init_user()
init_system_option()
init_system_version()
init_encrypt()
if __name__ == '__main__':
pass
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""NASNet-A models for Keras.
NASNet refers to Neural Architecture Search Network, a family of models
that were designed automatically by learning the model architectures
directly on the dataset of interest.
Here we consider NASNet-A, the highest performance model that was found
for the CIFAR-10 dataset, and then extended to ImageNet 2012 dataset,
obtaining state of the art performance on CIFAR-10 and ImageNet 2012.
Only the NASNet-A models, and their respective weights, which are suited
for ImageNet 2012 are provided.
The below table describes the performance on ImageNet 2012:
--------------------------------------------------------------------------------
Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M)
--------------------------------------------------------------------------------
| NASNet-A (4 @ 1056) | 74.0 % | 91.6 % | 564 M | 5.3 |
| NASNet-A (6 @ 4032) | 82.7 % | 96.2 % | 23.8 B | 88.9 |
--------------------------------------------------------------------------------
Reference paper:
- [Learning Transferable Architectures for Scalable Image Recognition]
(https://arxiv.org/abs/1707.07012) (CVPR 2018)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/nasnet/')
NASNET_MOBILE_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'NASNet-mobile.h5'
NASNET_MOBILE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'NASNet-mobile-no-top.h5'
NASNET_LARGE_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'NASNet-large.h5'
NASNET_LARGE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'NASNet-large-no-top.h5'
layers = VersionAwareLayers()
def NASNet(
input_shape=None,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=1000,
default_size=None,
classifier_activation='softmax'):
"""Instantiates a NASNet model.
Reference paper:
- [Learning Transferable Architectures for Scalable Image Recognition]
(https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Caution: Be sure to properly pre-process your inputs to the application.
Please see `applications.nasnet.preprocess_input` for an example.
Arguments:
input_shape: Optional shape tuple, the input shape
is by default `(331, 331, 3)` for NASNetLarge and
`(224, 224, 3)` for NASNetMobile.
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
penultimate_filters: Number of filters in the penultimate layer.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
num_blocks: Number of repeated blocks of the NASNet model.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
stem_block_filters: Number of filters in the initial stem block
skip_reduction: Whether to skip the reduction step at the tail
end of the network.
filter_multiplier: Controls the width of the network.
- If `filter_multiplier` < 1.0, proportionally decreases the number
of filters in each layer.
- If `filter_multiplier` > 1.0, proportionally increases the number
of filters in each layer.
- If `filter_multiplier` = 1, default number of filters from the
paper are used at each layer.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
default_size: Specifies the default image size of the model
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
Returns:
A `keras.Model` instance.
Raises:
ValueError: In case of invalid argument for `weights`,
invalid input shape or invalid `penultimate_filters` value.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
if (isinstance(input_shape, tuple) and None in input_shape and
weights == 'imagenet'):
raise ValueError('When specifying the input shape of a NASNet'
' and loading `ImageNet` weights, '
'the input_shape argument must be static '
'(no None entries). Got: `input_shape=' +
str(input_shape) + '`.')
if default_size is None:
default_size = 331
# Determine proper input shape and default size.
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=True,
weights=weights)
if backend.image_data_format() != 'channels_last':
logging.warning('The NASNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
backend.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if penultimate_filters % (24 * (filter_multiplier**2)) != 0:
raise ValueError(
'For NASNet-A models, the `penultimate_filters` must be a multiple '
'of 24 * (`filter_multiplier` ** 2). Current value: %d' %
penultimate_filters)
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
filters = penultimate_filters // 24
x = layers.Conv2D(
stem_block_filters, (3, 3),
strides=(2, 2),
padding='valid',
use_bias=False,
name='stem_conv1',
kernel_initializer='he_normal')(
img_input)
x = layers.BatchNormalization(
axis=channel_dim, momentum=0.9997, epsilon=1e-3, name='stem_bn1')(
x)
p = None
x, p = _reduction_a_cell(
x, p, filters // (filter_multiplier**2), block_id='stem_1')
x, p = _reduction_a_cell(
x, p, filters // filter_multiplier, block_id='stem_2')
for i in range(num_blocks):
x, p = _normal_a_cell(x, p, filters, block_id='%d' % (i))
x, p0 = _reduction_a_cell(
x, p, filters * filter_multiplier, block_id='reduce_%d' % (num_blocks))
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x, p, filters * filter_multiplier, block_id='%d' % (num_blocks + i + 1))
x, p0 = _reduction_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id='reduce_%d' % (2 * num_blocks))
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id='%d' % (2 * num_blocks + i + 1))
x = layers.Activation('relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = training.Model(inputs, x, name='NASNet')
# Load weights.
if weights == 'imagenet':
if default_size == 224: # mobile version
if include_top:
weights_path = data_utils.get_file(
'nasnet_mobile.h5',
NASNET_MOBILE_WEIGHT_PATH,
cache_subdir='models',
file_hash='020fb642bf7360b370c678b08e0adf61')
else:
weights_path = data_utils.get_file(
'nasnet_mobile_no_top.h5',
NASNET_MOBILE_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='1ed92395b5b598bdda52abe5c0dbfd63')
model.load_weights(weights_path)
elif default_size == 331: # large version
if include_top:
weights_path = data_utils.get_file(
'nasnet_large.h5',
NASNET_LARGE_WEIGHT_PATH,
cache_subdir='models',
file_hash='11577c9a518f0070763c2b964a382f17')
else:
weights_path = data_utils.get_file(
'nasnet_large_no_top.h5',
NASNET_LARGE_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='d81d89dc07e6e56530c4e77faddd61b5')
model.load_weights(weights_path)
else:
raise ValueError('ImageNet weights can only be loaded with NASNetLarge'
' or NASNetMobile')
elif weights is not None:
model.load_weights(weights)
if old_data_format:
backend.set_image_data_format(old_data_format)
return model
@keras_export('keras.applications.nasnet.NASNetMobile',
'keras.applications.NASNetMobile')
def NASNetMobile(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates a Mobile NASNet model in ImageNet mode.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: In case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
return NASNet(
input_shape,
penultimate_filters=1056,
num_blocks=4,
stem_block_filters=32,
skip_reduction=False,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224)
@keras_export('keras.applications.nasnet.NASNetLarge',
'keras.applications.NASNetLarge')
def NASNetLarge(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates a NASNet model in ImageNet mode.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Arguments:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
return NASNet(
input_shape,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=331)
def _separable_conv_block(ip,
filters,
kernel_size=(3, 3),
strides=(1, 1),
block_id=None):
"""Adds 2 blocks of [relu-separable conv-batchnorm].
Arguments:
ip: Input tensor
filters: Number of output filters per layer
kernel_size: Kernel size of separable convolutions
strides: Strided convolution for downsampling
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('separable_conv_block_%s' % block_id):
x = layers.Activation('relu')(ip)
if strides == (2, 2):
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name='separable_conv_1_pad_%s' % block_id)(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
x = layers.SeparableConv2D(
filters,
kernel_size,
strides=strides,
name='separable_conv_1_%s' % block_id,
padding=conv_pad,
use_bias=False,
kernel_initializer='he_normal')(
x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_1_bn_%s' % (block_id))(
x)
x = layers.Activation('relu')(x)
x = layers.SeparableConv2D(
filters,
kernel_size,
name='separable_conv_2_%s' % block_id,
padding='same',
use_bias=False,
kernel_initializer='he_normal')(
x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_2_bn_%s' % (block_id))(
x)
return x
def _adjust_block(p, ip, filters, block_id=None):
"""Adjusts the input `previous path` to match the shape of the `input`.
Used in situations where the output number of filters needs to be changed.
Arguments:
p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched
block_id: String block_id
Returns:
Adjusted Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
img_dim = 2 if backend.image_data_format() == 'channels_first' else -2
ip_shape = backend.int_shape(ip)
if p is not None:
p_shape = backend.int_shape(p)
with backend.name_scope('adjust_block'):
if p is None:
p = ip
elif p_shape[img_dim] != ip_shape[img_dim]:
with backend.name_scope('adjust_reduction_block_%s' % block_id):
p = layers.Activation('relu', name='adjust_relu_1_%s' % block_id)(p)
p1 = layers.AveragePooling2D((1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_1_%s' % block_id)(
p)
p1 = layers.Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_1_%s' % block_id,
kernel_initializer='he_normal')(
p1)
p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = layers.AveragePooling2D((1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_2_%s' % block_id)(
p2)
p2 = layers.Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_2_%s' % block_id,
kernel_initializer='he_normal')(
p2)
p = layers.concatenate([p1, p2], axis=channel_dim)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
elif p_shape[channel_dim] != filters:
with backend.name_scope('adjust_projection_block_%s' % block_id):
p = layers.Activation('relu')(p)
p = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='adjust_conv_projection_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
p)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
return p
def _normal_a_cell(ip, p, filters, block_id=None):
"""Adds a Normal cell for NASNet-A (Fig. 4 in the paper).
Arguments:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('normal_A_block_%s' % block_id):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation('relu')(ip)
h = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='normal_conv_1_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='normal_bn_1_%s' % block_id)(
h)
with backend.name_scope('block_1'):
x1_1 = _separable_conv_block(
h, filters, kernel_size=(5, 5), block_id='normal_left1_%s' % block_id)
x1_2 = _separable_conv_block(
p, filters, block_id='normal_right1_%s' % block_id)
x1 = layers.add([x1_1, x1_2], name='normal_add_1_%s' % block_id)
with backend.name_scope('block_2'):
x2_1 = _separable_conv_block(
p, filters, (5, 5), block_id='normal_left2_%s' % block_id)
x2_2 = _separable_conv_block(
p, filters, (3, 3), block_id='normal_right2_%s' % block_id)
x2 = layers.add([x2_1, x2_2], name='normal_add_2_%s' % block_id)
with backend.name_scope('block_3'):
x3 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_left3_%s' % (block_id))(
h)
x3 = layers.add([x3, p], name='normal_add_3_%s' % block_id)
with backend.name_scope('block_4'):
x4_1 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_left4_%s' % (block_id))(
p)
x4_2 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_right4_%s' % (block_id))(
p)
x4 = layers.add([x4_1, x4_2], name='normal_add_4_%s' % block_id)
with backend.name_scope('block_5'):
x5 = _separable_conv_block(
h, filters, block_id='normal_left5_%s' % block_id)
x5 = layers.add([x5, h], name='normal_add_5_%s' % block_id)
x = layers.concatenate([p, x1, x2, x3, x4, x5],
axis=channel_dim,
name='normal_concat_%s' % block_id)
return x, ip
def _reduction_a_cell(ip, p, filters, block_id=None):
"""Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).
Arguments:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('reduction_A_block_%s' % block_id):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation('relu')(ip)
h = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='reduction_conv_1_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='reduction_bn_1_%s' % block_id)(
h)
h3 = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(h, 3),
name='reduction_pad_1_%s' % block_id)(
h)
with backend.name_scope('block_1'):
x1_1 = _separable_conv_block(
h,
filters, (5, 5),
strides=(2, 2),
block_id='reduction_left1_%s' % block_id)
x1_2 = _separable_conv_block(
p,
filters, (7, 7),
strides=(2, 2),
block_id='reduction_right1_%s' % block_id)
x1 = layers.add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)
with backend.name_scope('block_2'):
x2_1 = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_left2_%s' % block_id)(
h3)
x2_2 = _separable_conv_block(
p,
filters, (7, 7),
strides=(2, 2),
block_id='reduction_right2_%s' % block_id)
x2 = layers.add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)
with backend.name_scope('block_3'):
x3_1 = layers.AveragePooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_left3_%s' % block_id)(
h3)
x3_2 = _separable_conv_block(
p,
filters, (5, 5),
strides=(2, 2),
block_id='reduction_right3_%s' % block_id)
x3 = layers.add([x3_1, x3_2], name='reduction_add3_%s' % block_id)
with backend.name_scope('block_4'):
x4 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='reduction_left4_%s' % block_id)(
x1)
x4 = layers.add([x2, x4])
with backend.name_scope('block_5'):
x5_1 = _separable_conv_block(
x1, filters, (3, 3), block_id='reduction_left4_%s' % block_id)
x5_2 = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_right5_%s' % block_id)(
h3)
x5 = layers.add([x5_1, x5_2], name='reduction_add4_%s' % block_id)
x = layers.concatenate([x2, x3, x4, x5],
axis=channel_dim,
name='reduction_concat_%s' % block_id)
return x, ip
@keras_export('keras.applications.nasnet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.nasnet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='', ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
|
|
#! /usr/bin/env python
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: [email protected]
#
# pylint: disable=invalid-name,missing-docstring
"""
Eliminate surplus reads.
Eliminate reads with median k-mer abundance higher than
DESIRED_COVERAGE. Output sequences will be placed in 'infile.keep', with the
option to output to STDOUT.
% python scripts/normalize-by-median.py [ -C <cutoff> ] <data1> <data2> ...
Use '-h' for parameter help.
"""
from __future__ import print_function
import sys
import screed
import os
import khmer
import textwrap
from khmer import khmer_args
from contextlib import contextmanager
from khmer.khmer_args import (build_counting_args, add_loadhash_args,
report_on_config, info)
import argparse
from khmer.kfile import (check_space, check_space_for_hashtable,
check_valid_file_exists)
from khmer.utils import write_record, check_is_pair, broken_paired_reader
DEFAULT_DESIRED_COVERAGE = 20
def WithDiagnostics(ifilename, norm, reader, fp):
"""
Generator/context manager to do boilerplate output of statistics using a
Normalizer object.
"""
index = 0
# per read diagnostic output
for index, record in enumerate(norm(reader)):
if norm.total % 100000 == 0:
print('... kept {kept} of {total} or {perc:2}% so far'
.format(kept=norm.total - norm.discarded,
total=norm.total,
perc=int(100. - norm.discarded /
float(norm.total) * 100.)),
file=sys.stderr)
print('... in file ' + ifilename, file=sys.stderr)
yield record
# per file diagnostic output
if norm.total == 0:
print('SKIPPED empty file ' + ifilename, file=sys.stderr)
else:
print('DONE with {inp}; kept {kept} of {total} or {perc:2}%'
.format(inp=ifilename, kept=norm.total - norm.discarded,
total=norm.total, perc=int(100. - norm.discarded /
float(norm.total) * 100.)),
file=sys.stderr)
if fp:
print("{total} {kept} {discarded}"
.format(total=norm.total, kept=norm.total - norm.discarded,
discarded=1. - (norm.discarded / float(norm.total))),
file=fp)
fp.flush()
class Normalizer(object):
"""
Digital normalization algorithm.
"""
def __init__(self, desired_coverage, htable):
self.htable = htable
self.desired_coverage = desired_coverage
self.total = 0
self.discarded = 0
def __call__(self, reader):
"""
Actually does digital normalization - the core algorithm.
* get one (unpaired) or two (paired) reads;
* sanitize the sequences (convert Ns to As);
* get the median k-mer count of one/both reads;
* if any read's median k-mer count is below desired coverage, keep all;
* consume and yield kept reads.
"""
desired_coverage = self.desired_coverage
for index, is_paired, read0, read1 in reader:
passed_filter = False
self.total += 1
if is_paired:
self.total += 1
batch = []
batch.append(read0)
if read1 is not None:
batch.append(read1)
for record in batch:
seq = record.sequence.replace('N', 'A')
if not self.htable.median_at_least(seq, desired_coverage):
passed_filter = True
if passed_filter:
for record in batch:
seq = record.sequence.replace('N', 'A')
self.htable.consume(seq)
yield record
else:
self.discarded += len(batch)
@contextmanager
def CatchIOErrors(ifile, out, single_out, force, corrupt_files):
"""
Context manager to do boilerplate handling of IOErrors.
"""
try:
yield
except (IOError, ValueError) as error:
print('** ERROR: ' + str(error), file=sys.stderr)
print('** Failed on {name}: '.format(name=ifile), file=sys.stderr)
if not single_out:
os.remove(out.name)
if not force:
print('** Exiting!', file=sys.stderr)
sys.exit(1)
else:
print('*** Skipping error file, moving on...', file=sys.stderr)
corrupt_files.append(ifile)
def get_parser():
epilog = ("""
Discard sequences based on whether or not their median k-mer abundance lies
above a specified cutoff. Kept sequences will be placed in <fileN>.keep.
By default, paired end reads will be considered together; if
either read should be kept, both will be kept. (This keeps both
reads from a fragment, and helps with retention of repeats.)
Unpaired reads are treated individually.
If :option:`-p`/`--paired` is set, then proper pairing is required
and the script will exit on unpaired reads, although
:option:`--unpaired-reads` can be used to supply a file of orphan
reads to be read after the paired reads.
:option:`--force-single` will ignore all pairing information and treat
reads individually.
With :option:`-s`/:option:`--savetable`, the k-mer counting table
will be saved to the specified file after all sequences have been
processed. With :option:`-d`, the k-mer counting table will be
saved every d files for multifile runs; if :option:`-s` is set,
the specified name will be used, and if not, the name `backup.ct`
will be used. :option:`-l`/:option:`--loadtable` will load the
specified k-mer counting table before processing the specified
files. Note that these tables are are in the same format as those
produced by :program:`load-into-counting.py` and consumed by
:program:`abundance-dist.py`.
To append reads to an output file (rather than overwriting it), send output
to STDOUT with `--out -` and use UNIX file redirection syntax (`>>`) to
append to the file.
Example::
normalize-by-median.py -k 17 tests/test-data/test-abund-read-2.fa
Example::
""" " normalize-by-median.py -p -k 17 tests/test-data/test-abund-read-paired.fa" # noqa
"""
Example::
""" " normalize-by-median.py -p -k 17 -o - tests/test-data/paired.fq >> appended-output.fq" # noqa
"""
Example::
""" " normalize-by-median.py -k 17 -f tests/test-data/test-error-reads.fq tests/test-data/test-fastq-reads.fq" # noqa
"""
Example::
""" " normalize-by-median.py -k 17 -d 2 -s test.ct tests/test-data/test-abund-read-2.fa tests/test-data/test-fastq-reads") # noqa
parser = build_counting_args(
descr="Do digital normalization (remove mostly redundant sequences)",
epilog=textwrap.dedent(epilog))
parser.add_argument('-C', '--cutoff', type=int,
default=DEFAULT_DESIRED_COVERAGE)
parser.add_argument('-p', '--paired', action='store_true',
help='require that all sequences be properly paired')
parser.add_argument('--force-single', dest='force_single',
action='store_true',
help='treat all sequences as single-ended/unpaired')
parser.add_argument('-u', '--unpaired-reads',
metavar="unpaired_reads_filename",
help='include a file of unpaired reads to which '
'-p/--paired does not apply.')
parser.add_argument('-s', '--savetable', metavar="filename", default='',
help='save the k-mer counting table to disk after all'
'reads are loaded.')
parser.add_argument('-R', '--report',
metavar='filename', type=argparse.FileType('w'))
parser.add_argument('-f', '--force', dest='force',
help='continue on next file if read errors are \
encountered', action='store_true')
parser.add_argument('-o', '--out', metavar="filename",
dest='single_output_file',
type=argparse.FileType('w'),
default=None, help='only output a single file with '
'the specified filename; use a single dash "-" to '
'specify that output should go to STDOUT (the '
'terminal)')
parser.add_argument('input_filenames', metavar='input_sequence_filename',
help='Input FAST[AQ] sequence filename.', nargs='+')
add_loadhash_args(parser)
return parser
def main(): # pylint: disable=too-many-branches,too-many-statements
info('normalize-by-median.py', ['diginorm'])
args = get_parser().parse_args()
report_on_config(args)
report_fp = args.report
force_single = args.force_single
# check for similar filenames
# if we're using a single output file only check for identical filenames
# otherwise, check for identical BASE names as well.
filenames = []
basenames = []
for pathfilename in args.input_filenames:
filenames.append(pathfilename)
if args.single_output_file:
continue # nothing more to worry about
basename = os.path.basename(pathfilename)
if basename in basenames:
print('ERROR: Duplicate filename--Cannot handle this!',
file=sys.stderr)
print('** Exiting!', file=sys.stderr)
sys.exit(1)
basenames.append(basename)
# check that files exist and there is sufficient output disk space.
check_valid_file_exists(args.input_filenames)
check_space(args.input_filenames, args.force)
if args.savetable:
check_space_for_hashtable(args, 'countgraph', args.force)
# load or create counting table.
if args.loadtable:
print('loading k-mer counting table from ' + args.loadtable,
file=sys.stderr)
htable = khmer.load_counting_hash(args.loadtable)
else:
print('making countgraph', file=sys.stderr)
htable = khmer_args.create_countgraph(args)
input_filename = None
# create an object to handle diginorm of all files
norm = Normalizer(args.cutoff, htable)
# make a list of all filenames and if they're paired or not;
# if we don't know if they're paired, default to allowing but not
# forcing pairing.
files = []
for e in filenames:
files.append([e, args.paired])
if args.unpaired_reads:
files.append([args.unpaired_reads, False])
corrupt_files = []
outfp = None
output_name = None
if args.single_output_file:
if args.single_output_file is sys.stdout:
output_name = '/dev/stdout'
else:
output_name = args.single_output_file.name
outfp = args.single_output_file
#
# main loop: iterate over all files given, do diginorm.
#
for filename, require_paired in files:
if not args.single_output_file:
output_name = os.path.basename(filename) + '.keep'
outfp = open(output_name, 'w')
# failsafe context manager in case an input file breaks
with CatchIOErrors(filename, outfp, args.single_output_file,
args.force, corrupt_files):
screed_iter = screed.open(filename, parse_description=False)
reader = broken_paired_reader(screed_iter, min_length=args.ksize,
force_single=force_single,
require_paired=require_paired)
# actually do diginorm
for record in WithDiagnostics(filename, norm, reader, report_fp):
if record is not None:
write_record(record, outfp)
print('output in ' + output_name, file=sys.stderr)
if output_name is not '/dev/stdout':
outfp.close()
# finished - print out some diagnostics.
print('Total number of unique k-mers: {0}'
.format(htable.n_unique_kmers()),
file=sys.stderr)
if args.savetable:
print('...saving to ' + args.savetable, file=sys.stderr)
htable.save(args.savetable)
fp_rate = \
khmer.calc_expected_collisions(htable, args.force, max_false_pos=.8)
# for max_false_pos see Zhang et al., http://arxiv.org/abs/1309.2975
print('fp rate estimated to be {fpr:1.3f}'.format(fpr=fp_rate),
file=sys.stderr)
if args.force and len(corrupt_files) > 0:
print("** WARNING: Finished with errors!", file=sys.stderr)
print("** IOErrors occurred in the following files:", file=sys.stderr)
print("\t", " ".join(corrupt_files), file=sys.stderr)
if __name__ == '__main__':
main()
# vim: set ft=python ts=4 sts=4 sw=4 et tw=79:
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import six
import subprocess
import sys
import time
# Need to set the environment variable before importing girder
os.environ['GIRDER_PORT'] = os.environ.get('GIRDER_PORT', '30001') # noqa
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource, RestException
from girder.constants import ROOT_DIR
from girder.utility.progress import ProgressContext
from . import base
from six.moves import range
testServer = None
def setUpModule():
global testServer
mockS3 = False
if 's3' in os.environ['ASSETSTORE_TYPE']:
mockS3 = True
plugins = os.environ.get('ENABLED_PLUGINS', '')
if plugins:
base.enabledPlugins.extend(plugins.split())
testServer = base.startServer(False, mockS3=mockS3)
def tearDownModule():
base.stopServer()
class WebClientTestEndpoints(Resource):
def __init__(self):
self.route('GET', ('progress', ), self.testProgress)
self.route('PUT', ('progress', 'stop'), self.testProgressStop)
self.route('POST', ('file', ), self.uploadFile)
self.stop = False
@access.token
def testProgress(self, params):
test = params.get('test', 'success')
duration = int(params.get('duration', 10))
startTime = time.time()
with ProgressContext(True, user=self.getCurrentUser(),
title='Progress Test', message='Progress Message',
total=duration) as ctx:
for current in range(duration):
if self.stop:
break
ctx.update(current=current)
wait = startTime + current + 1 - time.time()
if wait > 0:
time.sleep(wait)
if test == 'error':
raise RestException('Progress error test.')
testProgress.description = (
Description('Test progress contexts from the web')
.param('test', 'Name of test to run. These include "success" and '
'"failure".', required=False)
.param('duration', 'Duration of the test in seconds', required=False,
dataType='int'))
@access.token
def testProgressStop(self, params):
self.stop = True
testProgressStop.description = (
Description('Halt all progress tests'))
@access.user
def uploadFile(self, params):
"""
Providing this works around a limitation in phantom that makes us
unable to upload binary files, or at least ones that contain certain
byte values. The path parameter should be provided relative to the
root directory of the repository.
"""
self.requireParams(('folderId', 'path'), params)
path = os.path.join(ROOT_DIR, params['path'])
name = os.path.basename(path)
folder = self.model('folder').load(params['folderId'], force=True)
upload = self.model('upload').createUpload(
user=self.getCurrentUser(), name=name, parentType='folder',
parent=folder, size=os.path.getsize(path))
with open(path, 'rb') as fd:
file = self.model('upload').handleChunk(upload, fd)
return file
uploadFile.description = None
class WebClientTestCase(base.TestCase):
def setUp(self):
self.specFile = os.environ['SPEC_FILE']
self.coverageFile = os.environ.get('COVERAGE_FILE', '')
assetstoreType = os.environ['ASSETSTORE_TYPE']
self.webSecurity = os.environ.get('WEB_SECURITY', 'true')
if self.webSecurity != 'false':
self.webSecurity = 'true'
base.TestCase.setUp(self, assetstoreType)
# One of the web client tests uses this db, so make sure it is cleared
# ahead of time. This still allows tests to be run in parallel, since
# nothing should be stored in this db
base.dropGridFSDatabase('girder_webclient_gridfs')
testServer.root.api.v1.webclienttest = WebClientTestEndpoints()
def testWebClientSpec(self):
baseUrl = '/static/built/testEnv.html'
if os.environ.get('BASEURL', ''):
baseUrl = os.environ['BASEURL']
cmd = (
os.path.join(
ROOT_DIR, 'node_modules', 'phantomjs', 'bin', 'phantomjs'),
'--web-security=%s' % self.webSecurity,
os.path.join(ROOT_DIR, 'clients', 'web', 'test', 'specRunner.js'),
'http://localhost:%s%s' % (os.environ['GIRDER_PORT'], baseUrl),
self.specFile,
self.coverageFile,
os.environ.get('JASMINE_TIMEOUT', '')
)
# phantomjs occasionally fails to load javascript files. This appears
# to be a known issue: https://github.com/ariya/phantomjs/issues/10652.
# Retry several times if it looks like this has occurred.
for tries in range(5):
retry = False
task = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
hasJasmine = False
jasmineFinished = False
for line in iter(task.stdout.readline, b''):
if isinstance(line, six.binary_type):
line = line.decode('utf8')
if ('PHANTOM_TIMEOUT' in line or
'error loading source script' in line):
task.kill()
retry = True
elif '__FETCHEMAIL__' in line:
base.mockSmtp.waitForMail()
msg = base.mockSmtp.getMail()
open('phantom_temp_%s.tmp' % os.environ['GIRDER_PORT'],
'wb').write(msg.encode('utf8'))
continue # we don't want to print this
if 'Jasmine' in line:
hasJasmine = True
if 'Testing Finished' in line:
jasmineFinished = True
sys.stdout.write(line)
sys.stdout.flush()
returncode = task.wait()
if not retry and hasJasmine and jasmineFinished:
break
if not hasJasmine:
time.sleep(1)
sys.stderr.write('Retrying test\n')
# If we are retrying, we need to reset the whole test, as the
# databases and other resources are in an unknown state
self.tearDown()
self.setUp()
self.assertEqual(returncode, 0)
|
|
#!/usr/bin/python
import subprocess
import os
import json
import pytest
import uuid
import time
import sys
import threading
import shutil
from convoy import VolumeManager
TEST_ROOT = "/tmp/convoy_test/"
CFG_ROOT = os.path.join(TEST_ROOT, "convoy")
PID_FILE = os.path.join(TEST_ROOT, "convoy.pid")
LOG_FILE= os.path.join(TEST_ROOT, "convoy.log")
TEST_SNAPSHOT_FILE = "snapshot.test"
CONTAINER_NAME = "convoy-test"
CONTAINER = "yasker/convoy"
CONVOY_CONTAINER_CMD = ["docker", "exec", CONTAINER_NAME, "convoy"]
CONVOY_BINARY = [os.path.abspath("../../bin/convoy")]
DM = "devicemapper"
DM_ROOT = os.path.join(CFG_ROOT, DM)
TEST_THREAD_COUNT = 100
TEST_LOOP_COUNT = 100
VFS_BACKUP_DIR = os.path.join(TEST_ROOT, "Backup")
VFS_DEST = "vfs://" + VFS_BACKUP_DIR
VFS = "vfs"
VFS_ROOT = os.path.join(CFG_ROOT, VFS)
VFS_VOLUME_PATH = os.path.join(TEST_ROOT, "vfs-volumes")
EBS = "ebs"
ENV_TEST_AWS_REGION = "CONVOY_TEST_AWS_REGION"
ENV_TEST_AWS_BUCKET = "CONVOY_TEST_AWS_BUCKET"
S3_PATH = "test/volume/"
DD_BLOCK_SIZE = 4096
POOL_NAME = "convoy_test_pool"
DATA_FILE = "data.vol"
METADATA_FILE = "metadata.vol"
DATA_DEVICE_SIZE = 2147483648
METADATA_DEVICE_SIZE = 52428800
DM_DIR = "/dev/mapper"
DM_BLOCK_SIZE = 2097152
EMPTY_FILE_SIZE = 104857600
DEFAULT_VOLUME_SIZE = "1073741824"
VOLUME_SIZE_IOPS = "5G"
VOLUME_IOPS = "100"
VOLUME_SIZE_BIG_Bytes = "2147483648"
VOLUME_SIZE_BIG = "2G"
VOLUME_SIZE_SMALL = "1073741824"
VOLUME_SIZE_6M = "6M"
EBS_DEFAULT_VOLUME_TYPE = "standard"
VM_IMAGE_FILE = "disk.img"
data_dev = ""
metadata_dev = ""
mount_cleanup_list = []
dm_cleanup_list = []
volume_cleanup_list = []
test_ebs = False
test_container = False
def create_empty_file(filepath, size):
subprocess.check_call(["truncate", "-s", str(size), filepath])
assert os.path.exists(filepath)
def attach_loopback_dev(filepath):
dev = subprocess.check_output(["losetup", "--show", "-f",
filepath]).strip()
assert dev.startswith("/dev/loop")
return dev
def detach_loopback_dev(dev):
subprocess.check_output(["losetup", "-d", dev])
def mount_dev(dev, mountpoint):
subprocess.check_call(["mount", dev, mountpoint])
mount_cleanup_list.append(mountpoint)
def umount_dev(mountpoint):
subprocess.check_call(["umount", mountpoint])
mount_cleanup_list.remove(mountpoint)
def setup_module():
global test_ebs
test_ebs = pytest.config.getoption("ebs")
global test_container
test_container = pytest.config.getoption("container")
if os.path.exists(TEST_ROOT):
subprocess.check_call(["rm", "-rf", TEST_ROOT])
os.makedirs(TEST_ROOT)
assert os.path.exists(TEST_ROOT)
os.makedirs(VFS_BACKUP_DIR)
assert os.path.exists(VFS_BACKUP_DIR)
data_file = os.path.join(TEST_ROOT, DATA_FILE)
create_empty_file(data_file, DATA_DEVICE_SIZE)
global data_dev
data_dev = attach_loopback_dev(data_file)
metadata_file = os.path.join(TEST_ROOT, METADATA_FILE)
create_empty_file(metadata_file, METADATA_DEVICE_SIZE)
global metadata_dev
metadata_dev = attach_loopback_dev(metadata_file)
global v
cmdline = []
if test_container:
v = VolumeManager(CONVOY_CONTAINER_CMD, TEST_ROOT)
cmdline = ["convoy-start",
"--mnt-ns", "/host/proc/1/ns/mnt"]
else:
v = VolumeManager(CONVOY_BINARY, TEST_ROOT)
cmdline = ["daemon"]
cmdline += [
"--root", CFG_ROOT,
"--log", LOG_FILE,
"--drivers=" + DM,
"--driver-opts", "dm.datadev=" + data_dev,
"--driver-opts", "dm.metadatadev=" + metadata_dev,
"--driver-opts", "dm.thinpoolname=" + POOL_NAME,
"--driver-opts", "dm.defaultvolumesize=" + DEFAULT_VOLUME_SIZE,
"--drivers=" + VFS,
"--driver-opts", "vfs.path=" + VFS_VOLUME_PATH]
if test_ebs:
cmdline += ["--drivers=ebs",
"--driver-opts",
"ebs.defaultvolumesize=" + DEFAULT_VOLUME_SIZE,
"--driver-opts",
"ebs.defaultvolumetype=" + EBS_DEFAULT_VOLUME_TYPE]
if test_container:
v.start_server_container(CONTAINER_NAME, CFG_ROOT, TEST_ROOT, CONTAINER, cmdline)
else:
v.start_server(PID_FILE, cmdline)
dm_cleanup_list.append(POOL_NAME)
wait_for_daemon()
def detach_all_lodev(keyword):
output = subprocess.check_output(["losetup", "-a"])
lines = output.splitlines()
for line in lines:
if line.find(keyword) != -1:
detach_loopback_dev(line.split(":")[0].strip())
def teardown_module():
if test_container:
code = v.stop_server_container(CONTAINER_NAME)
else:
code = v.stop_server(PID_FILE)
if code != 0:
print "Something wrong when tearing down, continuing with code ", code
while mount_cleanup_list:
code = subprocess.call(["umount", mount_cleanup_list.pop()])
if code != 0:
print "Something wrong when tearing down, continuing with code", code
while dm_cleanup_list:
code = subprocess.call(["dmsetup", "remove", "--retry",
dm_cleanup_list.pop()])
if code != 0:
print "Something wrong when tearing down, continuing with code ", code
code = subprocess.call(["dmsetup", "remove", "--retry", POOL_NAME])
if code != 0:
print "Something wrong when tearing down, continuing with code ", code
code = subprocess.call(["losetup", "-d", data_dev, metadata_dev])
if code != 0:
print "Something wrong when tearing down, continuing with code", code
detach_all_lodev(TEST_ROOT)
def wait_for_daemon():
while True:
try:
data = v.server_info()
break
except subprocess.CalledProcessError:
print "Fail to communicate with daemon"
check_result = 0
if test_container:
check_result = v.check_server_container(CONTAINER_NAME)
else:
check_result = v.check_server(PID_FILE)
if check_result != 0:
print "Server failed to start"
teardown_module()
assert False
time.sleep(1)
info = json.loads(data)
success = True
try:
success = bool(success and DM in info["General"]["DriverList"])
success = bool(success and VFS in info["General"]["DriverList"])
success = bool(success and info["General"]["Root"] == CFG_ROOT)
success = bool(success and info["General"]["DefaultDriver"] == DM)
success = bool(success and info[DM]["Driver"] == "devicemapper")
success = bool(success and info[DM]["Root"] == DM_ROOT)
success = bool(success and info[DM]["DataDevice"] == data_dev)
success = bool(success and info[DM]["MetadataDevice"] == metadata_dev)
success = bool(success and info[DM]["ThinpoolDevice"] == os.path.join(DM_DIR, POOL_NAME))
success = bool(success and info[DM]["ThinpoolSize"] == str(DATA_DEVICE_SIZE))
success = bool(success and info[DM]["ThinpoolBlockSize"] == str(DM_BLOCK_SIZE))
success = bool(success and info[DM]["DefaultVolumeSize"] == DEFAULT_VOLUME_SIZE)
success = bool(success and info[VFS]["Root"] == VFS_ROOT)
success = bool(success and info[VFS]["Path"] == VFS_VOLUME_PATH)
if test_ebs:
success = bool(success and info[EBS]["DefaultVolumeSize"] == DEFAULT_VOLUME_SIZE)
success = bool(success and info[EBS]["DefaultVolumeType"] == EBS_DEFAULT_VOLUME_TYPE)
except:
success = False
if not success:
teardown_module()
assert False
@pytest.yield_fixture(autouse=True)
def cleanup_test():
yield
filenames = os.listdir(CFG_ROOT)
leftover_volumes = []
for filename in filenames:
if filename.startswith('volume'):
leftover_volumes.append(filename)
while volume_cleanup_list:
v = volume_cleanup_list.pop()
try:
delete_volume(v)
except:
print "Failed to delete volume ", v
if len(leftover_volumes) != 0:
print leftover_volumes
assert False
def create_volume(size = "", name = "", backup = "", driver = "",
volume_id = "", volume_type = "", iops = "", forvm = False):
name = v.create_volume(size, name, backup, driver,
volume_id, volume_type, iops, forvm)
if driver == "" or driver == DM:
dm_cleanup_list.append(name)
volume_cleanup_list.append(name)
return name
def delete_volume(name, ref_only = False):
v.delete_volume(name, ref_only)
try:
dm_cleanup_list.remove(name)
except ValueError:
pass
volume_cleanup_list.remove(name)
def mount_volume_with_path(name):
mount_dir = v.mount_volume_with_path(name)
mount_cleanup_list.append(mount_dir)
return mount_dir
def mount_volume(name):
mount_dir = v.mount_volume(name)
mount_cleanup_list.append(mount_dir)
return mount_dir
def umount_volume(name, mount_dir):
v.umount_volume(name)
mount_cleanup_list.remove(mount_dir)
def test_volume_crud():
volume_crud_test(DM, vmTest = False)
volume_crud_test(VFS, sizeTest = False)
def volume_crud_test(drv, sizeTest = True, vmTest = True):
name1 = create_volume(driver=drv)
name2 = create_volume(driver=drv)
if sizeTest:
name3 = create_volume(VOLUME_SIZE_BIG, driver = drv)
name4 = create_volume(VOLUME_SIZE_SMALL, driver = drv)
delete_volume(name4)
delete_volume(name3)
if vmTest:
name3 = create_volume(driver = drv, forvm = True)
name4 = create_volume(driver = drv, forvm = True)
delete_volume(name4)
delete_volume(name3)
delete_volume(name2)
delete_volume(name1)
@pytest.mark.skipif(not pytest.config.getoption("ebs"),
reason="--ebs was not specified")
def test_ebs_volume_crud():
# need to test volume type and create volume from existed EBS volume feature
name1 = create_volume(driver=EBS)
name2 = create_volume(size=VOLUME_SIZE_SMALL, driver=EBS, volume_type="gp2")
name3 = create_volume(size=VOLUME_SIZE_IOPS, driver=EBS, volume_type="io1",
iops="100")
volume3 = v.inspect_volume(name3)
ebs_volume_id3 = volume3["DriverInfo"]["EBSVolumeID"]
delete_volume(name3, ref_only = True)
name3 = create_volume(driver=EBS, volume_id=ebs_volume_id3)
delete_volume(name3)
delete_volume(name2)
delete_volume(name1)
def test_vfs_delete_volume_ref_only():
name = create_volume(driver=VFS)
insp = v.inspect_volume(name)
path = insp["DriverInfo"]["Path"]
assert os.path.exists(path)
filename = "testfile"
test_file = os.path.join(path,filename)
with open(test_file, "w") as f:
subprocess.check_call(["echo", "This is volume test file"], stdout=f)
assert os.path.exists(test_file)
delete_volume(name, ref_only = True)
assert os.path.exists(test_file)
os.remove(test_file)
def test_volume_name():
volume_name_test(DM)
volume_name_test(VFS)
def volume_name_test(drv):
vol_name1 = "vol.1_1-0"
vol_name2 = "vol.2_2-0"
vol = create_volume(name=vol_name1, driver=drv)
vols = v.list_volumes()
assert vols[vol]["Name"] == vol_name1
assert vols[vol]["Driver"] == drv
assert vols[vol]["CreatedTime"] != ""
with pytest.raises(subprocess.CalledProcessError):
new_name = create_volume(name=vol_name1, driver=drv)
with pytest.raises(subprocess.CalledProcessError):
new_name = create_volume(driver="randomdriver")
delete_volume(vol_name1)
vols = v.list_volumes()
assert vol not in vols
vol1 = create_volume(name=vol_name1, driver=drv)
vol2 = create_volume(name=vol_name2, driver=drv)
vols = v.list_volumes()
assert vols[vol1]["Name"] == vol_name1
assert vols[vol2]["Name"] == vol_name2
assert vols[vol1]["CreatedTime"] != ""
assert vols[vol2]["CreatedTime"] != ""
delete_volume(vol1)
delete_volume(vol_name2)
def mount_volume_and_create_file(name, filename):
# with format
volume_mount_dir = mount_volume(name)
test_file = os.path.join(volume_mount_dir,filename)
with open(test_file, "w") as f:
subprocess.check_call(["echo", "This is volume test file"], stdout=f)
assert os.path.exists(test_file)
umount_volume(name, volume_mount_dir)
# Doesn't work with current VFS implmentation, since it won't really mount
#assert not os.path.exists(test_file)
def test_volume_mount():
volume_mount_test(DM)
if test_ebs:
volume_mount_test(EBS)
# skip the vfs mount test because we only pass the original volume path as
# mount path, not really done any mount work now
def volume_mount_test(drv):
vol = create_volume(driver=drv)
# with format
filename = "test"
mount_volume_and_create_file(vol, filename)
# without format
volume_mount_dir = mount_volume_with_path(vol)
test_file = os.path.join(volume_mount_dir, filename)
assert os.path.exists(test_file)
umount_volume(vol, volume_mount_dir)
assert not os.path.exists(test_file)
# auto mount
volume_mount_dir = mount_volume(vol)
test_file = os.path.join(volume_mount_dir, filename)
assert os.path.exists(test_file)
umount_volume(vol, volume_mount_dir)
assert not os.path.exists(test_file)
delete_volume(vol)
def test_volume_vm_mount():
volume_vm_test(VFS)
def volume_vm_test(drv):
vol = create_volume(driver = drv, size = VOLUME_SIZE_SMALL, forvm = True)
mount_dir = mount_volume(vol)
image_filepath = os.path.join(mount_dir, VM_IMAGE_FILE)
assert os.path.exists(image_filepath)
size = os.stat(image_filepath).st_size
assert str(size) == VOLUME_SIZE_SMALL
umount_volume(vol, mount_dir)
delete_volume(vol)
def test_volume_list():
volume_list_driver_test(DM)
volume_list_driver_test(VFS, False)
if test_ebs:
volume_list_driver_test(EBS)
def volume_list_driver_test(drv, check_size = True):
volumes = v.list_volumes()
assert len(volumes) == 0
vol1 = create_volume(driver=drv)
vol2 = create_volume(driver=drv)
if check_size:
vol3 = create_volume(VOLUME_SIZE_BIG, driver=drv)
vol4 = create_volume(VOLUME_SIZE_SMALL, driver=drv)
volume = v.inspect_volume(vol1)
assert volume["Name"] == vol1
if check_size:
assert volume["DriverInfo"]["Size"] == DEFAULT_VOLUME_SIZE
volume = v.inspect_volume(vol2)
assert volume["Name"] == vol2
if check_size:
assert volume["DriverInfo"]["Size"] == DEFAULT_VOLUME_SIZE
if check_size:
volumes = v.list_volumes()
assert volumes[vol1]["DriverInfo"]["Size"] == DEFAULT_VOLUME_SIZE
assert volumes[vol2]["DriverInfo"]["Size"] == DEFAULT_VOLUME_SIZE
assert volumes[vol3]["DriverInfo"]["Size"] == VOLUME_SIZE_BIG_Bytes
assert volumes[vol4]["DriverInfo"]["Size"] == VOLUME_SIZE_SMALL
delete_volume(vol4)
delete_volume(vol3)
delete_volume(vol2)
delete_volume(vol1)
def test_snapshot_crud():
snapshot_crud_test(DM)
snapshot_crud_test(VFS)
def snapshot_crud_test(driver):
volume_name = create_volume(VOLUME_SIZE_SMALL, name="vol1", driver=driver)
snapshot = v.create_snapshot(volume_name)
v.delete_snapshot(snapshot)
delete_volume(volume_name)
# delete snapshot automatically with volume
volume_name = create_volume(VOLUME_SIZE_SMALL, name="vol1", driver=driver)
snap1 = v.create_snapshot(volume_name)
snap2 = v.create_snapshot(volume_name)
snap3 = v.create_snapshot(volume_name)
v.delete_snapshot(snap1)
v.delete_snapshot(snap2)
delete_volume(volume_name)
def test_snapshot_name():
snapshot_name_test(DM)
snapshot_name_test(VFS)
def snapshot_name_test(driver):
volume_name = create_volume(VOLUME_SIZE_SMALL, driver=driver)
snap1_name = "snap1"
snap1 = v.create_snapshot(volume_name, name=snap1_name)
assert snap1_name == snap1
vols = v.list_volumes()
s = vols[volume_name]["Snapshots"][snap1]
assert s["Name"] == snap1_name
assert s["DriverInfo"]["Driver"] == driver
assert s["CreatedTime"] != ""
with pytest.raises(subprocess.CalledProcessError):
new_name = v.create_snapshot(volume_name, name=snap1_name)
v.delete_snapshot(snap1)
delete_volume(volume_name)
def test_snapshot_list():
snapshot_list_test(DM)
snapshot_list_test(VFS, False)
def snapshot_list_test(driver, check_size = True):
volume1_name = create_volume(VOLUME_SIZE_SMALL, name = "volume1", driver=driver)
volume2_name = create_volume(VOLUME_SIZE_BIG, driver=driver)
with pytest.raises(subprocess.CalledProcessError):
snapshot = v.inspect_snapshot(str(uuid.uuid1()))
with pytest.raises(subprocess.CalledProcessError):
volume = v.inspect_snapshot(str(uuid.uuid1()))
snap0_vol1 = v.create_snapshot(volume1_name, "snap0_vol1")
assert snap0_vol1 == "snap0_vol1"
snapshot = v.inspect_snapshot("snap0_vol1")
assert snapshot["VolumeName"] == volume1_name
if check_size:
assert str(snapshot["DriverInfo"]["Size"]) == VOLUME_SIZE_SMALL
assert snapshot["Name"] == "snap0_vol1"
snap1_vol1 = v.create_snapshot(volume1_name)
snap2_vol1 = v.create_snapshot(volume1_name)
snap1_vol2 = v.create_snapshot(volume2_name, "snap1_vol2")
assert snap1_vol2 == "snap1_vol2"
snap2_vol2 = v.create_snapshot(volume2_name, "snap2_vol2")
assert snap2_vol2 == "snap2_vol2"
snap3_vol2 = v.create_snapshot(volume2_name, "snap3_vol2")
assert snap3_vol2 == "snap3_vol2"
volume = v.inspect_volume(volume2_name)
assert snap1_vol2 in volume["Snapshots"]
assert volume["Snapshots"][snap1_vol2]["Name"] == "snap1_vol2"
assert volume["Snapshots"][snap1_vol2]["CreatedTime"] != ""
assert snap2_vol2 in volume["Snapshots"]
assert volume["Snapshots"][snap2_vol2]["Name"] == "snap2_vol2"
assert volume["Snapshots"][snap2_vol2]["CreatedTime"] != ""
assert snap3_vol2 in volume["Snapshots"]
assert volume["Snapshots"][snap3_vol2]["Name"] == "snap3_vol2"
assert volume["Snapshots"][snap3_vol2]["CreatedTime"] != ""
volumes = v.list_volumes()
assert snap0_vol1 in volumes[volume1_name]["Snapshots"]
assert snap1_vol1 in volumes[volume1_name]["Snapshots"]
assert snap2_vol1 in volumes[volume1_name]["Snapshots"]
assert snap1_vol2 in volumes[volume2_name]["Snapshots"]
assert snap2_vol2 in volumes[volume2_name]["Snapshots"]
assert snap3_vol2 in volumes[volume2_name]["Snapshots"]
v.delete_snapshot(snap0_vol1)
with pytest.raises(subprocess.CalledProcessError):
snapshot = v.inspect_snapshot(snap0_vol1)
v.delete_snapshot(snap1_vol1)
v.delete_snapshot(snap2_vol1)
v.delete_snapshot(snap1_vol2)
v.delete_snapshot(snap2_vol2)
v.delete_snapshot(snap3_vol2)
delete_volume(volume2_name)
delete_volume(volume1_name)
@pytest.mark.skipif(not pytest.config.getoption("ebs"),
reason="--ebs was not specified")
def test_ebs_snapshot_backup():
volume_name = create_volume(size = VOLUME_SIZE_SMALL, name = "ebs_volume", driver=EBS)
assert volume_name == "ebs_volume"
mount_volume_and_create_file(volume_name, "test-vol1-v1")
snap1_name = v.create_snapshot("ebs_volume", "snap1")
assert snap1_name == "snap1"
volume = v.inspect_volume("ebs_volume")
snap1 = v.inspect_snapshot("snap1")
assert snap1["VolumeName"] == volume_name
assert snap1["Name"] == "snap1"
assert str(snap1["DriverInfo"]["Size"]) == VOLUME_SIZE_SMALL
assert snap1["DriverInfo"]["EBSVolumeID"] == volume["DriverInfo"]["EBSVolumeID"]
assert snap1["DriverInfo"]["Size"] == volume["DriverInfo"]["Size"]
backup_url = v.create_backup(snap1_name)
backup = v.inspect_backup(backup_url)
assert backup["EBSVolumeID"] == volume["DriverInfo"]["EBSVolumeID"]
assert backup["EBSSnapshotID"] == snap1["DriverInfo"]["EBSSnapshotID"]
assert backup["Size"] == snap1["DriverInfo"]["Size"]
v.delete_backup(backup_url)
v.delete_snapshot("snap1")
delete_volume(volume_name)
def create_delete_volume():
vol = v.create_volume(size = VOLUME_SIZE_6M)
snap = v.create_snapshot(vol)
v.delete_snapshot(snap)
v.delete_volume(vol)
# uses default driver which is device mapper
def test_create_volume_in_parallel():
threads = []
for i in range(TEST_THREAD_COUNT):
threads.append(threading.Thread(target = create_delete_volume))
threads[i].start()
for i in range(TEST_THREAD_COUNT):
threads[i].join()
def test_create_volume_in_sequence():
for i in range(TEST_LOOP_COUNT):
create_delete_volume()
def compress_volume(volume_name):
mountpoint = mount_volume(volume_name)
zipfile = os.path.join(TEST_ROOT, volume_name)
shutil.make_archive(zipfile, "zip", mountpoint)
umount_volume(volume_name, mountpoint)
return zipfile + ".zip"
def get_volume_checksum(volume_name, driver):
f = ""
if driver == VFS:
f = compress_volume(volume_name)
else: # DM/EBS
f = v.inspect_volume(volume_name)["DriverInfo"]["Device"]
output = subprocess.check_output(["sha512sum", f]).decode()
if driver == "VFS" and f != "":
os.remove(f)
return output.split(" ")[0]
def check_restore(origin_vol, restored_vol, driver):
volume_checksum = get_volume_checksum(origin_vol, driver)
restore_checksum = get_volume_checksum(restored_vol, driver)
assert volume_checksum == restore_checksum
def test_backup_create_restore_only():
process_restore_with_original_removed(VFS, VFS_DEST)
process_restore_with_original_removed(DM, VFS_DEST)
if test_ebs:
process_restore_with_original_removed(EBS)
def process_restore_with_original_removed(driver, dest = ""):
volume1_name = create_volume(size = VOLUME_SIZE_BIG, driver = driver)
mount_volume_and_create_file(volume1_name, "test-vol1-v1")
snap1_vol1_name = v.create_snapshot(volume1_name)
bak = v.create_backup(snap1_vol1_name, dest)
volume1_checksum = get_volume_checksum(volume1_name, driver)
delete_volume(volume1_name)
if driver == DM:
#cannot specify different size with backup
with pytest.raises(subprocess.CalledProcessError):
res_volume1_name = create_volume(VOLUME_SIZE_SMALL, "res-vol1", bak,
driver = driver)
res_volume1_name = create_volume(name = "res-vol1", backup = bak, driver =
driver)
res_volume1_checksum = get_volume_checksum(res_volume1_name, driver)
assert res_volume1_checksum == volume1_checksum
delete_volume(res_volume1_name)
v.delete_backup(bak)
def test_duplicate_backup():
process_duplicate_backup_test(VFS_DEST, VFS)
process_duplicate_backup_test(VFS_DEST, DM)
def process_duplicate_backup_test(dest, driver):
volume_name = create_volume(size = VOLUME_SIZE_BIG, driver = driver)
mount_volume_and_create_file(volume_name, "volume_snap_test")
snap_name = v.create_snapshot(volume_name)
volume_checksum = get_volume_checksum(volume_name, driver)
bak1 = v.create_backup(snap_name, dest)
bak2 = v.create_backup(snap_name, dest)
res2 = create_volume(backup = bak2, driver = driver)
res2_checksum = get_volume_checksum(res2, driver = driver)
assert res2_checksum == volume_checksum
v.delete_backup(bak2)
res1 = create_volume(backup = bak1, driver = driver)
res1_checksum = get_volume_checksum(res1, driver = driver)
assert res1_checksum == volume_checksum
v.delete_backup(bak1)
delete_volume(res2)
delete_volume(res1)
delete_volume(volume_name)
def test_vfs_objectstore():
vfs_objectstore_test(VFS)
vfs_objectstore_test(DM)
def vfs_objectstore_test(driver):
process_objectstore_test(VFS_DEST, driver)
@pytest.mark.skipif(not pytest.config.getoption("s3"),
reason="--s3 was not specified")
def test_s3_objectstore():
s3_objectstore_test(VFS)
s3_objectstore_test(DM)
def s3_objectstore_test(driver):
process_objectstore_test(get_s3_dest(), driver)
process_objectstore_test(get_s3_dest(S3_PATH), driver)
def get_s3_dest(path = ""):
region = os.environ[ENV_TEST_AWS_REGION]
bucket = os.environ[ENV_TEST_AWS_BUCKET]
return "s3://" + bucket + "@" + region + "/" + path
def unescape_url(url):
return url.replace("\\u0026", "&").replace("u0026","&")
def process_objectstore_test(dest, driver):
#make sure objectstore is empty
backups = v.list_backup(dest)
assert len(backups) == 0
#add volume to objectstore
name1 = "volume1_" + str(uuid.uuid4())[:8]
name2 = str(uuid.uuid4())[:2]
volume1_name = create_volume(VOLUME_SIZE_BIG, name1, driver=driver)
volume1 = v.inspect_volume(name1)
volume2_name = create_volume(VOLUME_SIZE_SMALL, name2, driver=driver)
with pytest.raises(subprocess.CalledProcessError):
backups = v.list_backup(dest, volume1_name)
#first snapshots
snap1_vol1_name = v.create_snapshot(volume1_name, "snap1_vol1")
snap1_vol1 = v.inspect_snapshot("snap1_vol1")
snap1_vol1_bak = v.create_backup("snap1_vol1", dest)
backups = v.list_backup(dest, volume1_name)
assert len(backups) == 1
backup = backups[unescape_url(snap1_vol1_bak)]
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume1["Name"]
if "Size" in volume1["DriverInfo"]:
assert backup["VolumeSize"] == volume1["DriverInfo"]["Size"]
assert backup["VolumeCreatedAt"] == volume1["CreatedTime"]
assert backup["SnapshotName"] == snap1_vol1["Name"]
assert backup["SnapshotCreatedAt"] == snap1_vol1["CreatedTime"]
assert backup["CreatedTime"] != ""
backup = v.inspect_backup(snap1_vol1_bak)
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume1["Name"]
if "Size" in volume1["DriverInfo"]:
assert backup["VolumeSize"] == volume1["DriverInfo"]["Size"]
assert backup["VolumeCreatedAt"] == volume1["CreatedTime"]
assert backup["SnapshotName"] == snap1_vol1["Name"]
assert backup["SnapshotCreatedAt"] == snap1_vol1["CreatedTime"]
assert backup["CreatedTime"] != ""
snap1_vol2_name = v.create_snapshot(volume2_name, "snap1_vol2")
snap1_vol2_bak = v.create_backup("snap1_vol2", dest)
#list snapshots
backups = v.list_backup(dest, volume2_name)
assert len(backups) == 1
backup = v.inspect_backup(snap1_vol2_bak)
assert backup["VolumeName"] == volume2_name
assert backup["SnapshotName"] == snap1_vol2_name
#second snapshots
mount_volume_and_create_file(volume1_name, "test-vol1-v1")
snap2_vol1_name = v.create_snapshot(volume1_name)
snap2_vol1_bak = v.create_backup(snap2_vol1_name, dest)
mount_volume_and_create_file(volume2_name, "test-vol2-v2")
snap2_vol2_name = v.create_snapshot(volume2_name)
snap2_vol2_bak = v.create_backup(snap2_vol2_name, dest)
#list snapshots again
backups = v.list_backup(dest)
assert len(backups) == 4
assert backups[unescape_url(snap1_vol1_bak)]["DriverName"] == driver
assert backups[unescape_url(snap1_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap1_vol1_bak)]["SnapshotName"] == snap1_vol1_name
assert backups[unescape_url(snap2_vol1_bak)]["DriverName"] == driver
assert backups[unescape_url(snap2_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap2_vol1_bak)]["SnapshotName"] == snap2_vol1_name
assert backups[unescape_url(snap1_vol2_bak)]["DriverName"] == driver
assert backups[unescape_url(snap1_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap1_vol2_bak)]["SnapshotName"] == snap1_vol2_name
assert backups[unescape_url(snap2_vol2_bak)]["DriverName"] == driver
assert backups[unescape_url(snap2_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap2_vol2_bak)]["SnapshotName"] == snap2_vol2_name
backups = v.list_backup(dest, volume1_name)
assert len(backups) == 2
assert backups[unescape_url(snap1_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap1_vol1_bak)]["SnapshotName"] == snap1_vol1_name
assert backups[unescape_url(snap2_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap2_vol1_bak)]["SnapshotName"] == snap2_vol1_name
backups = v.list_backup(dest, volume2_name)
assert len(backups) == 2
assert backups[unescape_url(snap1_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap1_vol2_bak)]["SnapshotName"] == snap1_vol2_name
assert backups[unescape_url(snap2_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap2_vol2_bak)]["SnapshotName"] == snap2_vol2_name
#restore snapshot
res_volume1_name = create_volume(name = "res-vol1", backup = snap2_vol1_bak,
driver=driver)
check_restore(volume1_name, res_volume1_name, driver)
res_volume2_name = create_volume(backup = snap2_vol2_bak, driver=driver)
check_restore(volume2_name, res_volume2_name, driver)
#remove snapshots from objectstore
v.delete_backup(snap2_vol1_bak)
v.delete_backup(snap2_vol2_bak)
#list snapshots again
backups = v.list_backup(dest)
assert len(backups) == 2
assert backups[unescape_url(snap1_vol1_bak)]["DriverName"] == driver
assert backups[unescape_url(snap1_vol1_bak)]["VolumeName"] == volume1_name
assert backups[unescape_url(snap1_vol1_bak)]["SnapshotName"] == snap1_vol1_name
assert backups[unescape_url(snap1_vol2_bak)]["DriverName"] == driver
assert backups[unescape_url(snap1_vol2_bak)]["VolumeName"] == volume2_name
assert backups[unescape_url(snap1_vol2_bak)]["SnapshotName"] == snap1_vol2_name
backups = v.list_backup(dest, volume1_name)
assert len(backups) == 1
backup = backups[unescape_url(snap1_vol1_bak)]
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume1_name
assert backup["SnapshotName"] == snap1_vol1_name
backup = v.inspect_backup(snap1_vol1_bak)
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume1_name
assert backup["SnapshotName"] == snap1_vol1_name
backups = v.list_backup(dest, volume2_name)
assert len(backups) == 1
backup = backups[unescape_url(snap1_vol2_bak)]
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume2_name
assert backup["SnapshotName"] == snap1_vol2_name
backup = v.inspect_backup(snap1_vol2_bak)
assert backup["DriverName"] == driver
assert backup["VolumeName"] == volume2_name
assert backup["SnapshotName"] == snap1_vol2_name
#remove snapshots from objectstore
v.delete_backup(snap1_vol2_bak)
v.delete_backup(snap1_vol1_bak)
v.delete_snapshot(snap1_vol1_name)
v.delete_snapshot(snap2_vol1_name)
v.delete_snapshot(snap1_vol2_name)
v.delete_snapshot(snap2_vol2_name)
delete_volume(volume1_name)
delete_volume(volume2_name)
delete_volume(res_volume1_name)
delete_volume(res_volume2_name)
def test_cross_restore_error_checking():
vfs_vol_name = create_volume(driver=VFS)
vfs_snap_name = v.create_snapshot(vfs_vol_name)
vfs_backup = v.create_backup(vfs_snap_name, VFS_DEST)
dm_vol_name = create_volume(size = VOLUME_SIZE_SMALL, driver=DM)
dm_snap_name = v.create_snapshot(dm_vol_name)
dm_backup = v.create_backup(dm_snap_name, VFS_DEST)
with pytest.raises(subprocess.CalledProcessError):
create_volume(driver=VFS, backup=dm_backup)
with pytest.raises(subprocess.CalledProcessError):
create_volume(driver=DM, backup=vfs_backup)
vfs_res = create_volume(driver=VFS, backup=vfs_backup)
dm_res = create_volume(driver=DM, backup=dm_backup)
delete_volume(vfs_vol_name)
delete_volume(vfs_res)
delete_volume(dm_vol_name)
delete_volume(dm_res)
|
|
from core import C
from basic import Basic
from singleton import S
from operations import AssocOp
from cache import cacheit
from expr import Expr
class Add(AssocOp):
__slots__ = []
is_Add = True
#identity = S.Zero
# cyclic import, so defined in numbers.py
@classmethod
def flatten(cls, seq):
"""
Takes the sequence "seq" of nested Adds and returns a flatten list.
Returns: (commutative_part, noncommutative_part, order_symbols)
Applies associativity, all terms are commutable with respect to
addition.
"""
terms = {} # term -> coeff
# e.g. x**2 -> 5 for ... + 5*x**2 + ...
coeff = S.Zero # standalone term
# e.g. 3 + ...
order_factors = []
for o in seq:
# O(x)
if o.is_Order:
for o1 in order_factors:
if o1.contains(o):
o = None
break
if o is None:
continue
order_factors = [o]+[o1 for o1 in order_factors if not o.contains(o1)]
continue
# 3 or NaN
elif o.is_Number:
if o is S.NaN or coeff is S.ComplexInfinity and o.is_bounded is False:
# we know for sure the result will be nan
return [S.NaN], [], None
if coeff.is_Number:
coeff += o
if coeff is S.NaN:
# we know for sure the result will be nan
return [S.NaN], [], None
continue
elif o is S.ComplexInfinity:
if coeff.is_bounded is False:
# we know for sure the result will be nan
return [S.NaN], [], None
coeff = S.ComplexInfinity
continue
# Add([...])
elif o.is_Add:
# NB: here we assume Add is always commutative
seq.extend(o.args) # TODO zerocopy?
continue
# Mul([...])
elif o.is_Mul:
c = o.args[0]
# 3*...
if c.is_Number:
if c is S.One:
s = o
else:
s = o.as_two_terms()[1]
else:
c = S.One
s = o
# everything else
else:
c = S.One
s = o
# now we have:
# o = c*s, where
#
# c is a Number
# s is an expression with number factor extracted
# let's collect terms with the same s, so e.g.
# 2*x**2 + 3*x**2 -> 5*x**2
if s in terms:
terms[s] += c
else:
terms[s] = c
# now let's construct new args:
# [2*x**2, x**3, 7*x**4, pi, ...]
newseq = []
noncommutative = False
for s,c in terms.items():
# 0*s
if c is S.Zero:
continue
# 1*s
elif c is S.One:
newseq.append(s)
# c*s
else:
if s.is_Mul:
# Mul, already keeps its arguments in perfect order.
# so we can simply put c in slot0 and go the fast way.
cs = s._new_rawargs(*((c,) + s.args))
newseq.append(cs)
else:
# alternatively we have to call all Mul's machinery (slow)
newseq.append(Mul(c,s))
noncommutative = noncommutative or not s.is_commutative
# oo, -oo
if coeff is S.Infinity:
newseq = [f for f in newseq if not (f.is_nonnegative or f.is_real and
(f.is_bounded or
f.is_finite or
f.is_infinitesimal))]
elif coeff is S.NegativeInfinity:
newseq = [f for f in newseq if not (f.is_nonpositive or f.is_real and
(f.is_bounded or
f.is_finite or
f.is_infinitesimal))]
if coeff is S.ComplexInfinity:
# zoo might be
# unbounded_real + bounded_im
# bounded_real + unbounded_im
# unbounded_real + unbounded_im
# addition of a bounded real or imaginary number won't be able to
# change the zoo nature; if unbounded a NaN condition could result if
# the unbounded symbol had sign opposite of the unbounded portion of zoo,
# e.g. unbounded_real - unbounded_real
newseq = [c for c in newseq if not (c.is_bounded and
c.is_real is not None)]
# process O(x)
if order_factors:
newseq2 = []
for t in newseq:
for o in order_factors:
# x + O(x) -> O(x)
if o.contains(t):
t = None
break
# x + O(x**2) -> x + O(x**2)
if t is not None:
newseq2.append(t)
newseq = newseq2 + order_factors
# 1 + O(1) -> O(1)
for o in order_factors:
if o.contains(coeff):
coeff = S.Zero
break
# order args canonically
# Currently we sort things using hashes, as it is quite fast. A better
# solution is not to sort things at all - but this needs some more
# fixing.
newseq.sort(key=hash)
# current code expects coeff to be always in slot-0
if coeff is not S.Zero:
newseq.insert(0, coeff)
# we are done
if noncommutative:
return [], newseq, None
else:
return newseq, [], None
@cacheit
def as_coeff_add(self, *deps):
if deps:
l1 = []
l2 = []
for f in self.args:
if f.has(*deps):
l2.append(f)
else:
l1.append(f)
return self._new_rawargs(*l1), tuple(l2)
coeff, notrat = self.args[0].as_coeff_add()
if not coeff is S.Zero:
return coeff, notrat + self.args[1:]
return S.Zero, self.args
@cacheit
def as_coeff_mul(self, *deps):
# -2 + 2 * a -> -1, 2-2*a
if self.args[0].is_Rational and self.args[0].is_negative:
return S.NegativeOne, (-self,)
return Expr.as_coeff_mul(self, *deps)
def _eval_derivative(self, s):
return Add(*[f.diff(s) for f in self.args])
def _eval_nseries(self, x, n):
terms = [t.nseries(x, n=n) for t in self.args]
return Add(*terms)
def _matches_simple(self, expr, repl_dict):
# handle (w+3).matches('x+5') -> {w: x+2}
coeff, terms = self.as_coeff_add()
if len(terms)==1:
return terms[0].matches(expr - coeff, repl_dict)
return
matches = AssocOp._matches_commutative
@staticmethod
def _combine_inverse(lhs, rhs):
"""
Returns lhs - rhs, but treats arguments like symbols, so things like
oo - oo return 0, instead of a nan.
"""
from sympy import oo, I, expand_mul
if lhs == oo and rhs == oo or lhs == oo*I and rhs == oo*I:
return S.Zero
return expand_mul(lhs - rhs)
@cacheit
def as_two_terms(self):
"""Return head and tail of self.
This is the most efficient way to get the head and tail of an
expression.
- if you want only the head, use self.args[0];
- if you want to process the arguments of the tail then use
self.as_coef_add() which gives the head and a tuple containing
the arguments of the tail when treated as an Add.
- if you want the coefficient when self is treated as a Mul
then use self.as_coeff_mul()[0]
>>> from sympy.abc import x, y
>>> (3*x*y).as_two_terms()
(3, x*y)
"""
if len(self.args) == 1:
return S.Zero, self
return self.args[0], self._new_rawargs(*self.args[1:])
def as_numer_denom(self):
numers, denoms = [],[]
for n,d in [f.as_numer_denom() for f in self.args]:
numers.append(n)
denoms.append(d)
r = xrange(len(numers))
return Add(*[Mul(*(denoms[:i]+[numers[i]]+denoms[i+1:]))
for i in r]), Mul(*denoms)
def _eval_is_polynomial(self, syms):
for term in self.args:
if not term._eval_is_polynomial(syms):
return False
return True
# assumption methods
_eval_is_real = lambda self: self._eval_template_is_attr('is_real')
_eval_is_bounded = lambda self: self._eval_template_is_attr('is_bounded')
_eval_is_commutative = lambda self: self._eval_template_is_attr('is_commutative')
_eval_is_integer = lambda self: self._eval_template_is_attr('is_integer')
_eval_is_comparable = lambda self: self._eval_template_is_attr('is_comparable')
def _eval_is_odd(self):
l = [f for f in self.args if not (f.is_even==True)]
if not l:
return False
if l[0].is_odd:
return self._new_rawargs(*l[1:]).is_even
def _eval_is_irrational(self):
for t in self.args:
a = t.is_irrational
if a: return True
if a is None: return
return False
def _eval_is_positive(self):
c, r = self.as_two_terms()
if c.is_positive and r.is_positive:
return True
if c.is_unbounded:
if r.is_unbounded:
# either c or r is negative
return
else:
return c.is_positive
elif r.is_unbounded:
return r.is_positive
if c.is_nonnegative and r.is_positive:
return True
if r.is_nonnegative and c.is_positive:
return True
if c.is_nonpositive and r.is_nonpositive:
return False
def _eval_is_negative(self):
c, r = self.as_two_terms()
if c.is_negative and r.is_negative:
return True
if c.is_unbounded:
if r.is_unbounded:
# either c or r is positive
return
else:
return c.is_negative
elif r.is_unbounded:
return r.is_negative
if c.is_nonpositive and r.is_negative:
return True
if r.is_nonpositive and c.is_negative:
return True
if c.is_nonnegative and r.is_nonnegative:
return False
def _eval_subs(self, old, new):
if self == old:
return new
if isinstance(old, FunctionClass):
return self.__class__(*[s._eval_subs(old, new) for s in self.args ])
coeff_self, terms_self = self.as_coeff_add()
coeff_old, terms_old = old.as_coeff_add()
if terms_self == terms_old: # (2+a).subs(3+a,y) -> 2-3+y
return Add(new, coeff_self, -coeff_old)
if old.is_Add:
if len(terms_old) < len(terms_self): # (a+b+c+d).subs(b+c,x) -> a+x+d
self_set = set(terms_self)
old_set = set(terms_old)
if old_set < self_set:
ret_set = self_set - old_set
return Add(new, coeff_self, -coeff_old, *[s._eval_subs(old, new) for s in ret_set])
return self.__class__(*[s._eval_subs(old, new) for s in self.args])
@cacheit
def extract_leading_order(self, *symbols):
"""
Returns the leading term and it's order.
Examples:
>>> from sympy.abc import x
>>> (x+1+1/x**5).extract_leading_order(x)
((x**(-5), O(x**(-5))),)
>>> (1+x).extract_leading_order(x)
((1, O(1)),)
>>> (x+x**2).extract_leading_order(x)
((x, O(x)),)
"""
lst = []
seq = [(f, C.Order(f, *symbols)) for f in self.args]
for ef,of in seq:
for e,o in lst:
if o.contains(of) and o != of:
of = None
break
if of is None:
continue
new_lst = [(ef,of)]
for e,o in lst:
if of.contains(o) and o != of:
continue
new_lst.append((e,o))
lst = new_lst
return tuple(lst)
def as_real_imag(self, deep=True):
sargs, terms = self.args, []
re_part, im_part = [], []
for term in sargs:
re, im = term.as_real_imag(deep=deep)
re_part.append(re)
im_part.append(im)
return (self.new(*re_part), self.new(*im_part))
def _eval_as_leading_term(self, x):
coeff, terms = self.as_coeff_add(x)
has_unbounded = bool([f for f in self.args if f.is_unbounded])
if has_unbounded:
if isinstance(terms, Basic):
terms = terms.args
terms = [f for f in terms if not f.is_bounded]
if coeff is not S.Zero:
o = C.Order(x)
else:
o = C.Order(terms[0]*x,x)
n = 1
s = self.nseries(x, n=n)
while s.is_Order:
n +=1
s = self.nseries(x, n=n)
if s.is_Add:
s = s.removeO()
if s.is_Add:
lst = s.extract_leading_order(x)
return Add(*[e for (e,f) in lst])
return s.as_leading_term(x)
def _eval_power(self, other, terms=False):
# n n n
# (-3 + y) -> (-1) * (3 - y)
#
# If terms=True then return the arguments that should be
# multiplied together rather than multiplying them.
#
# At present, as_coeff_terms return +/-1 but the
# following should work even if that changes.
if Basic.keep_sign:
return None
rv = None
c, t = self.as_coeff_mul()
if c.is_negative and not other.is_integer:
if c is not S.NegativeOne and self.is_positive:
coeff = C.Pow(-c, other)
assert len(t) == 1, 't'
b = -t[0]
rv = (coeff, C.Pow(b, other))
elif c is not S.One:
coeff = C.Pow(c, other)
assert len(t) == 1, 't'
b = t[0]
rv = (coeff, C.Pow(b, other))
if not rv or terms:
return rv
else:
return C.Mul(*rv)
def _eval_conjugate(self):
return Add(*[t.conjugate() for t in self.args])
def _eval_expand_basic(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_basic'):
newterm = term._eval_expand_basic(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_exp(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_exp'):
newterm = term._eval_expand_power_exp(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_power_base(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_power_base'):
newterm = term._eval_expand_power_base(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_mul(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_mul'):
newterm = term._eval_expand_mul(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_multinomial(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_multinomial'):
newterm = term._eval_expand_multinomial(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_log(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_log'):
newterm = term._eval_expand_log(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_complex(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_complex'):
newterm = term._eval_expand_complex(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_trig(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_trig'):
newterm = term._eval_expand_trig(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def _eval_expand_func(self, deep=True, **hints):
sargs, terms = self.args, []
for term in sargs:
if hasattr(term, '_eval_expand_func'):
newterm = term._eval_expand_func(deep=deep, **hints)
else:
newterm = term
terms.append(newterm)
return self.new(*terms)
def __neg__(self):
return Add(*[-t for t in self.args])
def _sage_(self):
s = 0
for x in self.args:
s += x._sage_()
return s
def primitive(self):
"""
Divide ``self`` by the GCD of coefficients of ``self``.
Example
=======
>>> from sympy.abc import x, y
>>> (2*x + 4*y).primitive()
(2, x + 2*y)
>>> (2*x/3 + 4*y/9).primitive()
(2/9, 2*y + 3*x)
>>> (2*x/3 + 4.1*y).primitive()
(1, 2*x/3 + 4.1*y)
"""
terms = []
cont = S.Zero
for term in self.args:
coeff = term.as_coeff_mul()[0]
if coeff.is_Rational:
cont = cont.gcd(coeff)
if cont is not S.One:
terms.append(term)
continue
return S.One, self
for i, term in enumerate(terms):
# XXX: this is extremely slow
terms[i] = term/cont
return cont, self._new_rawargs(*terms)
from function import FunctionClass
from mul import Mul
from symbol import Symbol
|
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Segmentation results visualization on a given set of images.
See model.py for more details and usage.
"""
import math
import os.path
import time
import numpy as np
import tensorflow as tf
from deeplab import common
from deeplab import model
from deeplab.datasets import segmentation_dataset
from deeplab.utils import input_generator
from deeplab.utils import save_annotation
slim = tf.contrib.slim
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
# Settings for log directories.
flags.DEFINE_string('vis_logdir', None, 'Where to write the event logs.')
flags.DEFINE_string('checkpoint_dir', None, 'Directory of model checkpoints.')
# Settings for visualizing the model.
flags.DEFINE_integer('vis_batch_size', 1,
'The number of images in each batch during evaluation.')
flags.DEFINE_multi_integer('vis_crop_size', [513, 513],
'Crop size [height, width] for visualization.')
flags.DEFINE_integer('eval_interval_secs', 60 * 5,
'How often (in seconds) to run evaluation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Change to [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] for multi-scale test.
flags.DEFINE_multi_float('eval_scales', [1.0],
'The scales to resize images for evaluation.')
# Change to True for adding flipped images during test.
flags.DEFINE_bool('add_flipped_images', False,
'Add flipped images for evaluation or not.')
# Dataset settings.
flags.DEFINE_string('dataset', 'pascal_voc_seg',
'Name of the segmentation dataset.')
flags.DEFINE_string('vis_split', 'val',
'Which split of the dataset used for visualizing results')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
flags.DEFINE_enum('colormap_type', 'pascal', ['pascal', 'cityscapes'],
'Visualization colormap type.')
flags.DEFINE_boolean('also_save_raw_predictions', False,
'Also save raw predictions.')
flags.DEFINE_integer('max_number_of_iterations', 0,
'Maximum number of visualization iterations. Will loop '
'indefinitely upon nonpositive values.')
# The folder where semantic segmentation predictions are saved.
_SEMANTIC_PREDICTION_SAVE_FOLDER = 'segmentation_results'
# The folder where raw semantic segmentation predictions are saved.
_RAW_SEMANTIC_PREDICTION_SAVE_FOLDER = 'raw_segmentation_results'
# The format to save image.
_IMAGE_FORMAT = '%06d_image'
# The format to save prediction
_PREDICTION_FORMAT = '%06d_prediction'
# To evaluate Cityscapes results on the evaluation server, the labels used
# during training should be mapped to the labels for evaluation.
_CITYSCAPES_TRAIN_ID_TO_EVAL_ID = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 31, 32, 33]
def _convert_train_id_to_eval_id(prediction, train_id_to_eval_id):
"""Converts the predicted label for evaluation.
There are cases where the training labels are not equal to the evaluation
labels. This function is used to perform the conversion so that we could
evaluate the results on the evaluation server.
Args:
prediction: Semantic segmentation prediction.
train_id_to_eval_id: A list mapping from train id to evaluation id.
Returns:
Semantic segmentation prediction whose labels have been changed.
"""
converted_prediction = prediction.copy()
for train_id, eval_id in enumerate(train_id_to_eval_id):
converted_prediction[prediction == train_id] = eval_id
return converted_prediction
def _process_batch(sess, original_images, semantic_predictions, image_names,
image_heights, image_widths, image_id_offset, save_dir,
raw_save_dir, train_id_to_eval_id=None):
"""Evaluates one single batch qualitatively.
Args:
sess: TensorFlow session.
original_images: One batch of original images.
semantic_predictions: One batch of semantic segmentation predictions.
image_names: Image names.
image_heights: Image heights.
image_widths: Image widths.
image_id_offset: Image id offset for indexing images.
save_dir: The directory where the predictions will be saved.
raw_save_dir: The directory where the raw predictions will be saved.
train_id_to_eval_id: A list mapping from train id to eval id.
"""
(original_images,
semantic_predictions,
image_names,
image_heights,
image_widths) = sess.run([original_images, semantic_predictions,
image_names, image_heights, image_widths])
num_image = semantic_predictions.shape[0]
for i in range(num_image):
image_height = np.squeeze(image_heights[i])
image_width = np.squeeze(image_widths[i])
original_image = np.squeeze(original_images[i])
semantic_prediction = np.squeeze(semantic_predictions[i])
crop_semantic_prediction = semantic_prediction[:image_height, :image_width]
# Save image.
save_annotation.save_annotation(
original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),
add_colormap=False)
# Save prediction.
save_annotation.save_annotation(
crop_semantic_prediction, save_dir,
_PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,
colormap_type=FLAGS.colormap_type)
if FLAGS.also_save_raw_predictions:
image_filename = os.path.basename(image_names[i])
if train_id_to_eval_id is not None:
crop_semantic_prediction = _convert_train_id_to_eval_id(
crop_semantic_prediction,
train_id_to_eval_id)
save_annotation.save_annotation(
crop_semantic_prediction, raw_save_dir, image_filename,
add_colormap=False)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Get dataset-dependent information.
dataset = segmentation_dataset.get_dataset(
FLAGS.dataset, FLAGS.vis_split, dataset_dir=FLAGS.dataset_dir)
train_id_to_eval_id = None
if dataset.name == segmentation_dataset.get_cityscapes_dataset_name():
tf.logging.info('Cityscapes requires converting train_id to eval_id.')
train_id_to_eval_id = _CITYSCAPES_TRAIN_ID_TO_EVAL_ID
# Prepare for visualization.
tf.gfile.MakeDirs(FLAGS.vis_logdir)
save_dir = os.path.join(FLAGS.vis_logdir, _SEMANTIC_PREDICTION_SAVE_FOLDER)
tf.gfile.MakeDirs(save_dir)
raw_save_dir = os.path.join(
FLAGS.vis_logdir, _RAW_SEMANTIC_PREDICTION_SAVE_FOLDER)
tf.gfile.MakeDirs(raw_save_dir)
tf.logging.info('Visualizing on %s set', FLAGS.vis_split)
g = tf.Graph()
with g.as_default():
samples = input_generator.get(dataset,
FLAGS.vis_crop_size,
FLAGS.vis_batch_size,
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
dataset_split=FLAGS.vis_split,
is_training=False,
model_variant=FLAGS.model_variant)
model_options = common.ModelOptions(
outputs_to_num_classes={common.OUTPUT_TYPE: dataset.num_classes},
crop_size=FLAGS.vis_crop_size,
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
if tuple(FLAGS.eval_scales) == (1.0,):
tf.logging.info('Performing single-scale test.')
predictions = model.predict_labels(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid)
else:
tf.logging.info('Performing multi-scale test.')
predictions = model.predict_labels_multi_scale(
samples[common.IMAGE],
model_options=model_options,
eval_scales=FLAGS.eval_scales,
add_flipped_images=FLAGS.add_flipped_images)
predictions = predictions[common.OUTPUT_TYPE]
if FLAGS.min_resize_value and FLAGS.max_resize_value:
# Only support batch_size = 1, since we assume the dimensions of original
# image after tf.squeeze is [height, width, 3].
assert FLAGS.vis_batch_size == 1
# Reverse the resizing and padding operations performed in preprocessing.
# First, we slice the valid regions (i.e., remove padded region) and then
# we reisze the predictions back.
original_image = tf.squeeze(samples[common.ORIGINAL_IMAGE])
original_image_shape = tf.shape(original_image)
predictions = tf.slice(
predictions,
[0, 0, 0],
[1, original_image_shape[0], original_image_shape[1]])
resized_shape = tf.to_int32([tf.squeeze(samples[common.HEIGHT]),
tf.squeeze(samples[common.WIDTH])])
predictions = tf.squeeze(
tf.image.resize_images(tf.expand_dims(predictions, 3),
resized_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True), 3)
tf.train.get_or_create_global_step()
saver = tf.train.Saver(slim.get_variables_to_restore())
sv = tf.train.Supervisor(graph=g,
logdir=FLAGS.vis_logdir,
init_op=tf.global_variables_initializer(),
summary_op=None,
summary_writer=None,
global_step=None,
saver=saver)
num_batches = int(math.ceil(
dataset.num_samples / float(FLAGS.vis_batch_size)))
last_checkpoint = None
# Loop to visualize the results when new checkpoint is created.
num_iters = 0
while (FLAGS.max_number_of_iterations <= 0 or
num_iters < FLAGS.max_number_of_iterations):
num_iters += 1
last_checkpoint = slim.evaluation.wait_for_new_checkpoint(
FLAGS.checkpoint_dir, last_checkpoint)
start = time.time()
tf.logging.info(
'Starting visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
tf.logging.info('Visualizing with model %s', last_checkpoint)
with sv.managed_session(FLAGS.master,
start_standard_services=False) as sess:
sv.start_queue_runners(sess)
sv.saver.restore(sess, last_checkpoint)
image_id_offset = 0
for batch in range(num_batches):
tf.logging.info('Visualizing batch %d / %d', batch + 1, num_batches)
_process_batch(sess=sess,
original_images=samples[common.ORIGINAL_IMAGE],
semantic_predictions=predictions,
image_names=samples[common.IMAGE_NAME],
image_heights=samples[common.HEIGHT],
image_widths=samples[common.WIDTH],
image_id_offset=image_id_offset,
save_dir=save_dir,
raw_save_dir=raw_save_dir,
train_id_to_eval_id=train_id_to_eval_id)
image_id_offset += FLAGS.vis_batch_size
tf.logging.info(
'Finished visualization at ' + time.strftime('%Y-%m-%d-%H:%M:%S',
time.gmtime()))
time_to_next_eval = start + FLAGS.eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
if __name__ == '__main__':
flags.mark_flag_as_required('checkpoint_dir')
flags.mark_flag_as_required('vis_logdir')
flags.mark_flag_as_required('dataset_dir')
tf.app.run()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpointing the CacheDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class CacheDatasetCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def setUp(self):
self.range_size = 10
self.num_repeats = 3
self.num_outputs = self.range_size * self.num_repeats
self.cache_file_prefix = 'test'
def make_dataset_fn(self, is_memory):
if is_memory:
filename = ''
else:
filename = os.path.join(self.get_temp_dir(), self.cache_file_prefix)
def ds_fn():
return dataset_ops.Dataset.range(self.range_size).cache(filename).repeat(
self.num_repeats)
return ds_fn
def expected_outputs(self):
return list(range(self.range_size)) * self.num_repeats
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointBeforeOneEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 5 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointBeforeOneEpochThenRunFewSteps(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 8 entries from iterator but save checkpoint after producing 5.
outputs = self.gen_outputs(
ds_fn, [5], 8, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, range(8))
outputs = outputs[:5]
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointAfterOneEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 15 entries from iterator and save checkpoint.
outputs = self.gen_outputs(ds_fn, [], 15, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(5)))
# Restore from checkpoint and produce the rest of the elements from the
# iterator.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 15,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, self.expected_outputs())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointAfterOneEpochThenRunFewSteps(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 18 entries from iterator but save checkpoint after producing 15.
outputs = self.gen_outputs(
ds_fn, [15], 18, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(8)))
outputs = list(range(10)) + list(range(5)) + self.gen_outputs(
ds_fn, [],
self.num_outputs - 15,
ckpt_saved=True,
verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointBeforeOneEpochButRunCompleteEpoch(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Generate 13 entries from iterator but save checkpoint after producing 5.
outputs = self.gen_outputs(
ds_fn, [5], 13, verify_exhausted=False, save_checkpoint_at_end=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(3)))
# Since we ran for more than one epoch, the cache was completely written.
# The ckpt was saved when the iterator was in cache-write mode. Test that
# the iterator falls back to read mode after restoring if the cache has
# been completely written.
outputs = list(range(5)) + self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointUnusedWriterIterator(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Checkpoint before get_next is called even once.
outputs = self.gen_outputs(ds_fn, [], 0, verify_exhausted=False)
self.assertSequenceEqual(outputs, [])
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, ckpt_saved=True, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testCheckpointUnusedMidwayWriterIterator(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 5 elements and checkpoint.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint, then produce no elements and checkpoint.
outputs.extend(
self.gen_outputs(ds_fn, [], 0, ckpt_saved=True, verify_exhausted=False))
self.assertSequenceEqual(outputs, range(5))
# Restore from checkpoint and produce rest of the elements.
outputs.extend(
self.gen_outputs(
ds_fn, [],
self.num_outputs - 5,
ckpt_saved=True,
verify_exhausted=False))
self.assertSequenceEqual(outputs, list(range(10)) * 3)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testUnusedCheckpointError(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 5 elements and save ckpt.
outputs = self.gen_outputs(ds_fn, [], 5, verify_exhausted=False)
self.assertSequenceEqual(outputs, range(5))
if is_memory:
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
self.assertSequenceEqual(outputs, self.expected_outputs())
else:
# Since the complete cache has not been written, a new iterator which does
# not restore the checkpoint will throw an error since there is a partial
# cache shard.
with self.assertRaises(errors.AlreadyExistsError):
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(is_memory=[True, False])))
def testIgnoreCheckpointIfCacheWritten(self, is_memory):
ds_fn = self.make_dataset_fn(is_memory)
# Produce 15 elements and save ckpt. This will write the complete cache.
outputs = self.gen_outputs(ds_fn, [], 15, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) + list(range(5)))
# Build the iterator again but do not restore from ckpt. Since the cache
# has already been written we should be able to use it.
outputs = self.gen_outputs(
ds_fn, [], self.num_outputs, verify_exhausted=False)
self.assertSequenceEqual(outputs, list(range(10)) * 3)
if __name__ == '__main__':
test.main()
|
|
# -*- coding: utf-8 -*-
from django.conf.urls import include, url
from . import views
from .models import (NrsimhaTile, GoldenBrick, GuruParamparaBrick,
RadhaMadhavaBrick, SilverCoin, GadadharCoin, AdvaitaCoin,
GoldCoin, PlatinumCoin, RadharaniCoin,
SquareFeet, SquareMeter, Trustee, GeneralDonation)
from .forms import (NrsimhaTileForm, GoldenBrickForm, GuruParamparaBrickForm,
RadhaMadhavaBrickForm, SilverCoinForm, GadadharCoinForm,
AdvaitaCoinForm, GoldCoinForm, PlatinumCoinForm,
RadharaniCoinForm, SquareFeetForm, SquareMeterForm,
TrusteeForm, GeneralDonationForm)
urlpatterns = [
url(r'^nrsimha-tile/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.BrickCreateView.as_view(
model=NrsimhaTile,
form_class=NrsimhaTileForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.BrickDetailView.as_view(
model=NrsimhaTile,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.BrickUpdateView.as_view(
model=NrsimhaTile,
form_class=NrsimhaTileForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=NrsimhaTile,
),
name='delete'
),
], namespace="nrsimha-tile")),
url(r'^golden-brick/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.BrickCreateView.as_view(
model=GoldenBrick,
form_class=GoldenBrickForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.BrickDetailView.as_view(
model=GoldenBrick,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.BrickUpdateView.as_view(
model=GoldenBrick,
form_class=GoldenBrickForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GoldenBrick,
),
name='delete'
),
], namespace="golden-brick")),
url(r'^guru-parampara-brick/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.BrickCreateView.as_view(
model=GuruParamparaBrick,
form_class=GuruParamparaBrickForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.BrickDetailView.as_view(
model=GuruParamparaBrick,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.BrickUpdateView.as_view(
model=GuruParamparaBrick,
form_class=GuruParamparaBrickForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GuruParamparaBrick,
),
name='delete'
),
], namespace="guru-parampara-brick")),
url(r'^radha-madhava-brick/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.BrickCreateView.as_view(
model=RadhaMadhavaBrick,
form_class=RadhaMadhavaBrickForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.BrickDetailView.as_view(
model=RadhaMadhavaBrick,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.BrickUpdateView.as_view(
model=RadhaMadhavaBrick,
form_class=RadhaMadhavaBrickForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=RadhaMadhavaBrick,
),
name='delete'
),
], namespace="radha-madhava-brick")),
url(r'^srivas-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=SilverCoin,
form_class=SilverCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=SilverCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=SilverCoin,
form_class=SilverCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=SilverCoin,
),
name='delete'
),
], namespace="srivas-coin")),
url(r'^gadadhar-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=GadadharCoin,
form_class=GadadharCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=GadadharCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=GadadharCoin,
form_class=GadadharCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GadadharCoin,
),
name='delete'
),
], namespace="gadadhar-coin")),
url(r'^advaita-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=AdvaitaCoin,
form_class=AdvaitaCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=AdvaitaCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=AdvaitaCoin,
form_class=AdvaitaCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=AdvaitaCoin,
),
name='delete'
),
], namespace="advaita-coin")),
url(r'^gold-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=GoldCoin,
form_class=GoldCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=GoldCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=GoldCoin,
form_class=GoldCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GoldCoin,
),
name='delete'
),
], namespace="nityananda-coin")),
url(r'^platinum-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=PlatinumCoin,
form_class=PlatinumCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=PlatinumCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=PlatinumCoin,
form_class=PlatinumCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=PlatinumCoin,
),
name='delete'
),
], namespace="caitanya-coin")),
url(r'^radharani-coin/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.CoinCreateView.as_view(
model=RadharaniCoin,
form_class=RadharaniCoinForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.CoinDetailView.as_view(
model=RadharaniCoin,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.CoinUpdateView.as_view(
model=RadharaniCoin,
form_class=RadharaniCoinForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=RadharaniCoin,
),
name='delete'
),
], namespace="radharani-coin")),
url(r'^square-feet/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.FeetCreateView.as_view(
model=SquareFeet,
form_class=SquareFeetForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.FeetDetailView.as_view(
model=SquareFeet,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.FeetUpdateView.as_view(
model=SquareFeet,
form_class=SquareFeetForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=SquareFeet,
),
name='delete'
),
], namespace="square-feet")),
url(r'^square-meter/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.FeetCreateView.as_view(
model=SquareMeter,
form_class=SquareMeterForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.FeetDetailView.as_view(
model=SquareMeter,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.FeetUpdateView.as_view(
model=SquareMeter,
form_class=SquareMeterForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=SquareMeter,
),
name='delete'
),
], namespace="square-meter")),
url(r'^trustee/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.TrusteeCreateView.as_view(
model=Trustee,
form_class=TrusteeForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.TrusteeDetailView.as_view(
model=Trustee,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.TrusteeUpdateView.as_view(
model=Trustee,
form_class=TrusteeForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=Trustee,
),
name='delete'
),
], namespace="trustee")),
url(r'^general-donation/', include(
[
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/create/$',
view=views.GeneralDonationCreateView.as_view(
model=GeneralDonation,
form_class=GeneralDonationForm,
),
name='create'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/$',
view=views.GeneralDonationDetailView.as_view(
model=GeneralDonation,
),
name='detail'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/update/$',
view=views.GeneralDonationUpdateView.as_view(
model=GeneralDonation,
form_class=GeneralDonationForm,
),
name='update'
),
url(
regex=r'^(?P<person_id>\d+)/(?P<pledge_id>\d+)/(?P<pk>\d+)/delete/$',
view=views.PromotionDeleteView.as_view(
model=GeneralDonation,
),
name='delete'
),
], namespace="general-donation")),
]
|
|
import logging
from collections import defaultdict
import ailment
from .. import Analysis
from ..calling_convention import CallingConventionAnalysis
from ..code_location import CodeLocation
from ..forward_analysis import ForwardAnalysis, FunctionGraphVisitor
from ...engines.light import SpOffset, SimEngineLightVEX, SimEngineLightAIL
from ...errors import SimEngineError
from ...keyed_region import KeyedRegion
from ...knowledge_plugins import Function
from ...sim_variable import SimStackVariable, SimRegisterVariable
l = logging.getLogger("angr.analyses.variable_recovery.variable_recovery_fast")
class ProcessorState(object):
__slots__ = ['_arch', 'sp_adjusted', 'sp_adjustment', 'bp_as_base', 'bp']
def __init__(self, arch):
self._arch = arch
# whether we have met the initial stack pointer adjustment
self.sp_adjusted = None
# how many bytes are subtracted from the stack pointer
self.sp_adjustment = arch.bits / 8 if arch.call_pushes_ret else 0
# whether the base pointer is used as the stack base of the stack frame or not
self.bp_as_base = None
# content of the base pointer
self.bp = None
def copy(self):
s = ProcessorState(self._arch)
s.sp_adjusted = self.sp_adjusted
s.sp_adjustment = self.sp_adjustment
s.bp_as_base = self.bp_as_base
s.bp = self.bp
return s
def merge(self, other):
if not self == other:
l.warn("Inconsistent merge: %s %s ", self, other)
# FIXME: none of the following logic makes any sense...
if other.sp_adjusted is True:
self.sp_adjusted = True
self.sp_adjustment = max(self.sp_adjustment, other.sp_adjustment)
if other.bp_as_base is True:
self.bp_as_base = True
self.bp = max(self.bp, other.bp)
return self
def __eq__(self, other):
if not isinstance(other, ProcessorState):
return False
return (self.sp_adjusted == other.sp_adjusted and
self.sp_adjustment == other.sp_adjustment and
self.bp == other.bp and
self.bp_as_base == other.bp_as_base)
def __repr__(self):
return "<ProcessorState %s%#x%s %s>" % (self.bp, self.sp_adjustment,
" adjusted" if self.sp_adjusted else "", self.bp_as_base)
def get_engine(base_engine):
class SimEngineVR(base_engine):
def __init__(self):
super(SimEngineVR, self).__init__()
self.processor_state = None
self.variable_manager = None
@property
def func_addr(self):
if self.state is None:
return None
return self.state.function.addr
def process(self, state, *args, **kwargs): # pylint:disable=unused-argument
# we are using a completely different state. Therefore, we directly call our _process() method before
# SimEngine becomes flexible enough.
try:
self._process(state, None, block=kwargs.pop('block', None))
except SimEngineError as e:
if kwargs.pop('fail_fast', False) is True:
raise e
def _process(self, state, successors, block=None, func_addr=None): # pylint:disable=unused-argument
self.processor_state = state.processor_state
self.variable_manager = state.variable_manager
super(SimEngineVR, self)._process(state, successors, block=block)
#
# VEX
#
# Statement handlers
def _handle_Put(self, stmt):
offset = stmt.offset
data = self._expr(stmt.data)
size = stmt.data.result_size(self.tyenv) / 8
self._assign_to_register(offset, data, size)
def _handle_Store(self, stmt):
addr = self._expr(stmt.addr)
size = stmt.data.result_size(self.tyenv) / 8
data = self._expr(stmt.data)
self._store(addr, data, size)
# Expression handlers
def _handle_Get(self, expr):
reg_offset = expr.offset
reg_size = expr.result_size(self.tyenv) / 8
return self._read_from_register(reg_offset, reg_size)
def _handle_Load(self, expr):
addr = self._expr(expr.addr)
size = expr.result_size(self.tyenv) / 8
return self._load(addr, size)
#
# AIL
#
# Statement handlers
def _ail_handle_Assignment(self, stmt):
dst_type = type(stmt.dst)
if dst_type is ailment.Expr.Register:
offset = stmt.dst.reg_offset
data = self._expr(stmt.src)
size = stmt.src.bits / 8
self._assign_to_register(offset, data, size)
elif dst_type is ailment.Expr.Tmp:
# simply write to self.tmps
data = self._expr(stmt.src)
if data is None:
return
self.tmps[stmt.dst.tmp_idx] = data
else:
l.warning('Unsupported dst type %s.', dst_type)
def _ail_handle_Store(self, stmt):
addr = self._expr(stmt.addr)
data = self._expr(stmt.data)
size = stmt.data.bits / 8
self._store(addr, data, size)
def _ail_handle_Jump(self, stmt):
pass
def _ail_handle_ConditionalJump(self, stmt):
pass
def _ail_handle_Call(self, stmt):
pass
# Expression handlers
def _ail_handle_Register(self, expr):
offset = expr.reg_offset
size = expr.bits / 8
return self._read_from_register(offset, size)
def _ail_handle_Load(self, expr):
addr = self._expr(expr.addr)
size = expr.size
return self._load(addr, size)
#
# Logic
#
def _assign_to_register(self, offset, data, size):
"""
:param int offset:
:param data:
:param int size:
:return:
"""
codeloc = self._codeloc()
if offset == self.arch.sp_offset:
if type(data) is SpOffset:
sp_offset = data.offset
self.processor_state.sp_adjusted = True
self.processor_state.sp_adjustment = sp_offset
l.debug('Adjusting stack pointer at %#x with offset %+#x.', self.ins_addr, sp_offset)
return
if offset == self.arch.bp_offset:
if data is not None:
self.processor_state.bp = data
else:
self.processor_state.bp = None
return
# handle register writes
if type(data) is SpOffset:
# lea
stack_offset = data.offset
existing_vars = self.variable_manager[self.func_addr].find_variables_by_stmt(self.block.addr,
self.stmt_idx,
'memory')
if not existing_vars:
# TODO: how to determine the size for a lea?
existing_vars = self.state.stack_region.get_variables_by_offset(stack_offset)
if not existing_vars:
size = 1
variable = SimStackVariable(stack_offset, size, base='bp',
ident=self.variable_manager[self.func_addr].next_variable_ident(
'stack'),
region=self.func_addr,
)
self.variable_manager[self.func_addr].add_variable('stack', stack_offset, variable)
l.debug('Identified a new stack variable %s at %#x.', variable, self.ins_addr)
else:
variable = next(iter(existing_vars))
else:
variable, _ = existing_vars[0]
self.state.stack_region.add_variable(stack_offset, variable)
base_offset = self.state.stack_region.get_base_addr(stack_offset)
for var in self.state.stack_region.get_variables_by_offset(base_offset):
self.variable_manager[self.func_addr].reference_at(var, stack_offset - base_offset, codeloc)
else:
pass
# register writes
existing_vars = self.variable_manager[self.func_addr].find_variables_by_stmt(self.block.addr, self.stmt_idx,
'register'
)
if not existing_vars:
variable = SimRegisterVariable(offset, size,
ident=self.variable_manager[self.func_addr].next_variable_ident(
'register'),
region=self.func_addr
)
self.variable_manager[self.func_addr].add_variable('register', offset, variable)
else:
variable, _ = existing_vars[0]
self.state.register_region.set_variable(offset, variable)
self.variable_manager[self.func_addr].write_to(variable, 0, codeloc)
def _store(self, addr, data, size): # pylint:disable=unused-argument
"""
:param addr:
:param data:
:param int size:
:return:
"""
if type(addr) is SpOffset:
# Storing data to stack
stack_offset = addr.offset
existing_vars = self.variable_manager[self.func_addr].find_variables_by_stmt(self.block.addr, self.stmt_idx,
'memory')
if not existing_vars:
variable = SimStackVariable(stack_offset, size, base='bp',
ident=self.variable_manager[self.func_addr].next_variable_ident('stack'),
region=self.func_addr,
)
self.variable_manager[self.func_addr].add_variable('stack', stack_offset, variable)
l.debug('Identified a new stack variable %s at %#x.', variable, self.ins_addr)
else:
variable, _ = existing_vars[0]
self.state.stack_region.set_variable(stack_offset, variable)
base_offset = self.state.stack_region.get_base_addr(stack_offset)
codeloc = CodeLocation(self.block.addr, self.stmt_idx, ins_addr=self.ins_addr)
for var in self.state.stack_region.get_variables_by_offset(stack_offset):
self.variable_manager[self.func_addr].write_to(var,
stack_offset - base_offset,
codeloc
)
def _load(self, addr, size):
"""
:param addr:
:param size:
:return:
"""
if type(addr) is SpOffset:
# Loading data from stack
stack_offset = addr.offset
if stack_offset not in self.state.stack_region:
variable = SimStackVariable(stack_offset, size, base='bp',
ident=self.variable_manager[self.func_addr].next_variable_ident('stack'),
region=self.func_addr,
)
self.state.stack_region.add_variable(stack_offset, variable)
self.variable_manager[self.func_addr].add_variable('stack', stack_offset, variable)
l.debug('Identified a new stack variable %s at %#x.', variable, self.ins_addr)
base_offset = self.state.stack_region.get_base_addr(stack_offset)
codeloc = CodeLocation(self.block.addr, self.stmt_idx, ins_addr=self.ins_addr)
all_vars = self.state.stack_region.get_variables_by_offset(base_offset)
assert len(all_vars) == 1 # we enabled phi nodes
var = next(iter(all_vars))
self.variable_manager[self.func_addr].read_from(var,
stack_offset - base_offset,
codeloc,
# overwrite=True
)
def _read_from_register(self, offset, size):
"""
:param offset:
:param size:
:return:
"""
codeloc = self._codeloc()
if offset == self.arch.sp_offset:
# loading from stack pointer
return SpOffset(self.arch.bits, self.processor_state.sp_adjustment, is_base=False)
elif offset == self.arch.bp_offset:
return self.processor_state.bp
if offset not in self.state.register_region:
variable = SimRegisterVariable(offset, size,
ident=self.variable_manager[self.func_addr].next_variable_ident('register'),
region=self.func_addr,
)
self.state.register_region.add_variable(offset, variable)
self.variable_manager[self.func_addr].add_variable('register', offset, variable)
for var in self.state.register_region.get_variables_by_offset(offset):
self.variable_manager[self.func_addr].read_from(var, 0, codeloc)
return None
return SimEngineVR
class VariableRecoveryFastState(object):
"""
The abstract state of variable recovery analysis.
"""
def __init__(self, variable_manager, arch, func, stack_region=None, register_region=None, processor_state=None,
make_phi=None):
self.variable_manager = variable_manager
self.arch = arch
self.function = func
self._make_phi = make_phi
if stack_region is not None:
self.stack_region = stack_region
else:
self.stack_region = KeyedRegion()
if register_region is not None:
self.register_region = register_region
else:
self.register_region = KeyedRegion()
self.processor_state = ProcessorState(self.arch) if processor_state is None else processor_state
def __repr__(self):
return "<VRAbstractState: %d register variables, %d stack variables>" % (len(self.register_region), len(self.stack_region))
def __eq__(self, other):
if type(other) is not VariableRecoveryFastState:
return False
return self.stack_region == other.stack_region and self.register_region == other.register_region
def copy(self):
state = VariableRecoveryFastState(
self.variable_manager,
self.arch,
self.function,
stack_region=self.stack_region.copy(),
register_region=self.register_region.copy(),
processor_state=self.processor_state.copy(),
make_phi=self._make_phi,
)
return state
def merge(self, other, successor=None):
"""
Merge two abstract states.
:param VariableRecoveryState other: The other abstract state to merge.
:return: The merged abstract state.
:rtype: VariableRecoveryState
"""
def _make_phi(*variables):
return self._make_phi(successor, *variables)
merged_stack_region = self.stack_region.copy().merge(other.stack_region, make_phi_func=_make_phi)
merged_register_region = self.register_region.copy().merge(other.register_region, make_phi_func=_make_phi)
state = VariableRecoveryFastState(
self.variable_manager,
self.arch,
self.function,
stack_region=merged_stack_region,
register_region=merged_register_region,
processor_state=self.processor_state.copy().merge(other.processor_state),
make_phi=self._make_phi,
)
return state
#
# Util methods
#
def _normalize_register_offset(self, offset): #pylint:disable=no-self-use
# TODO:
return offset
def _to_signed(self, n):
if n >= 2 ** (self.arch.bits - 1):
# convert it to a negative number
return n - 2 ** self.arch.bits
return n
class VariableRecoveryFast(ForwardAnalysis, Analysis): #pylint:disable=abstract-method
"""
Recover "variables" from a function by keeping track of stack pointer offsets and pattern matching VEX statements.
"""
def __init__(self, func, max_iterations=3, clinic=None):
"""
:param knowledge.Function func: The function to analyze.
:param int max_iterations:
:param clinic:
"""
function_graph_visitor = FunctionGraphVisitor(func)
ForwardAnalysis.__init__(self, order_jobs=True, allow_merging=True, allow_widening=False,
graph_visitor=function_graph_visitor)
self.function = func
self._node_to_state = { }
self._node_to_input_state = { }
self.variable_manager = self.kb.variables
self._max_iterations = max_iterations
self._clinic = clinic
self._ail_engine = get_engine(SimEngineLightAIL)()
self._vex_engine = get_engine(SimEngineLightVEX)()
self._node_iterations = defaultdict(int)
# phi nodes dict
self._cached_phi_nodes = { }
self._node_to_cc = { }
self._analyze()
#
# Main analysis routines
#
def _pre_analysis(self):
CallingConventionAnalysis.recover_calling_conventions(self.project)
# initialize node_to_cc map
function_nodes = [n for n in self.function.transition_graph.nodes() if isinstance(n, Function)]
for func_node in function_nodes:
for callsite_node in self.function.transition_graph.predecessors(func_node):
self._node_to_cc[callsite_node.addr] = func_node.calling_convention
def _pre_job_handling(self, job):
pass
def _initial_abstract_state(self, node):
# annotate the stack pointer
# concrete_state.regs.sp = concrete_state.regs.sp.annotate(StackLocationAnnotation(8))
# give it enough stack space
# concrete_state.regs.bp = concrete_state.regs.sp + 0x100000
state = VariableRecoveryFastState(self.variable_manager, self.project.arch, self.function,
make_phi=self._make_phi_node
)
# put a return address on the stack if necessary
if self.project.arch.call_pushes_ret:
ret_addr_offset = self.project.arch.bits / 8
ret_addr_var = SimStackVariable(ret_addr_offset, self.project.arch.bits / 8, base='bp', name='ret_addr',
region=self.function.addr, category='return_address',
)
state.stack_region.add_variable(ret_addr_offset, ret_addr_var)
return state
def _merge_states(self, node, *states):
return states[0].merge(states[1], successor=node.addr)
def _run_on_node(self, node, state):
"""
:param angr.Block node:
:param VariableRecoveryState state:
:return:
"""
input_state = state # make it more meaningful
if self._clinic:
# AIL mode
block = self._clinic.block(node.addr, node.size)
else:
# VEX mode
block = self.project.factory.block(node.addr, node.size, opt_level=0)
if node.addr in self._node_to_input_state:
prev_state = self._node_to_input_state[node.addr]
#if node.addr == 0x804824f:
# print "###\n### STACK\n###"
# print input_state.stack_region.dbg_repr()
# print ""
# print prev_state.stack_region.dbg_repr()
# print "###\nREGISTER\n###"
# print input_state.register_region.dbg_repr()
# print ""
# print prev_state.register_region.dbg_repr()
# # import ipdb; ipdb.set_trace()
if input_state == prev_state:
l.debug('Skip node %#x as we have reached a fixed-point', node.addr)
return False, input_state
else:
l.debug('Merging input state of node %#x with the previous state.', node.addr)
input_state = prev_state.merge(input_state, successor=node.addr)
state = input_state.copy()
self._node_to_input_state[node.addr] = input_state
if self._node_iterations[node.addr] >= self._max_iterations:
l.debug('Skip node %#x as we have iterated %d times on it.', node.addr, self._node_iterations[node.addr])
return False, state
self._process_block(state, block)
self._node_to_state[node.addr] = state
self._node_iterations[node.addr] += 1
return True, state
def _intra_analysis(self):
pass
def _post_analysis(self):
self.variable_manager.initialize_variable_names()
for addr, state in self._node_to_state.iteritems():
self.variable_manager[self.function.addr].set_live_variables(addr,
state.register_region,
state.stack_region
)
#
# Private methods
#
def _process_block(self, state, block): #pylint:disable=no-self-use
"""
Scan through all statements and perform the following tasks:
- Find stack pointers and the VEX temporary variable storing stack pointers
- Selectively calculate VEX statements
- Track memory loading and mark stack and global variables accordingly
:param angr.Block block:
:return:
"""
l.debug('Processing block %#x.', block.addr)
processor = self._ail_engine if isinstance(block, ailment.Block) else self._vex_engine
processor.process(state, block=block, fail_fast=self._fail_fast)
# readjusting sp at the end for blocks that end in a call
if block.addr in self._node_to_cc:
cc = self._node_to_cc[block.addr]
state.processor_state.sp_adjustment += cc.sp_delta
state.processor_state.sp_adjusted = True
l.debug('Adjusting stack pointer at end of block %#x with offset %+#x.', block.addr, state.processor_state.sp_adjustment)
def _make_phi_node(self, block_addr, *variables):
key = tuple(sorted(variables, key=lambda v: v.ident))
if block_addr not in self._cached_phi_nodes:
self._cached_phi_nodes[block_addr] = { }
if key in self._cached_phi_nodes[block_addr]:
return self._cached_phi_nodes[block_addr][key]
phi_node = self.variable_manager[self.function.addr].make_phi_node(*variables)
self._cached_phi_nodes[block_addr][key] = phi_node
return phi_node
from angr.analyses import AnalysesHub
AnalysesHub.register_default('VariableRecoveryFast', VariableRecoveryFast)
|
|
"""
Internal tasks are tasks that are started from the teuthology infrastructure.
Note that there is no corresponding task defined for this module. All of
the calls are made from other modules, most notably teuthology/run.py
"""
import contextlib
import functools
import gzip
import logging
import os
import shutil
import time
import yaml
import subprocess
import humanfriendly
import teuthology.lock.ops
from teuthology import misc
from teuthology.packaging import get_builder_project
from teuthology import report
from teuthology.config import config as teuth_config
from teuthology.exceptions import ConfigError, VersionNotFoundError
from teuthology.job_status import get_status, set_status
from teuthology.orchestra import cluster, remote, run
# the below import with noqa is to workaround run.py which does not support multilevel submodule import
from teuthology.task.internal.redhat import setup_cdn_repo, setup_base_repo, setup_additional_repo, setup_stage_cdn # noqa
log = logging.getLogger(__name__)
@contextlib.contextmanager
def base(ctx, config):
"""
Create the test directory that we will be using on the remote system
"""
log.info('Creating test directory...')
testdir = misc.get_testdir(ctx)
run.wait(
ctx.cluster.run(
args=['mkdir', '-p', '-m0755', '--', testdir],
wait=False,
)
)
try:
yield
finally:
log.info('Tidying up after the test...')
# if this fails, one of the earlier cleanups is flawed; don't
# just cram an rm -rf here
run.wait(
ctx.cluster.run(
args=['find', testdir, '-ls',
run.Raw(';'),
'rmdir', '--', testdir],
wait=False,
),
)
def save_config(ctx, config):
"""
Store the config in a yaml file
"""
log.info('Saving configuration')
if ctx.archive is not None:
with open(os.path.join(ctx.archive, 'config.yaml'), 'w') as f:
yaml.safe_dump(ctx.config, f, default_flow_style=False)
def check_packages(ctx, config):
"""
Checks gitbuilder to determine if there are missing packages for this job.
If there are missing packages, fail the job.
"""
for task in ctx.config['tasks']:
if list(task.keys())[0] == 'buildpackages':
log.info("Checking packages skipped because "
"the task buildpackages was found.")
return
log.info("Checking packages...")
os_type = ctx.config.get("os_type")
sha1 = ctx.config.get("sha1")
# We can only do this check if there are a defined sha1 and os_type
# in the job config.
if os_type and sha1:
package = get_builder_project()("ceph", ctx.config)
template = "Checking packages for os_type '{os}', " \
"flavor '{flav}' and ceph hash '{ver}'"
log.info(
template.format(
os=package.os_type,
flav=package.flavor,
ver=package.sha1,
)
)
if package.version:
log.info("Found packages for ceph version {ver}".format(
ver=package.version
))
else:
msg = "Packages for distro '{d}' and ceph hash '{ver}' not found"
msg = msg.format(
d=package.distro,
ver=package.sha1,
)
log.error(msg)
# set the failure message and update paddles with the status
ctx.summary["failure_reason"] = msg
set_status(ctx.summary, "dead")
report.try_push_job_info(ctx.config, dict(status='dead'))
raise VersionNotFoundError(package.base_url)
else:
log.info(
"Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'".format(
os=os_type,
ver=sha1,
)
)
@contextlib.contextmanager
def timer(ctx, config):
"""
Start the timer used by teuthology
"""
log.info('Starting timer...')
start = time.time()
try:
yield
finally:
duration = time.time() - start
log.info('Duration was %f seconds', duration)
ctx.summary['duration'] = duration
def add_remotes(ctx, config):
"""
Create a ctx.cluster object populated with remotes mapped to roles
"""
ctx.cluster = cluster.Cluster()
# Allow jobs to run without using nodes, for self-testing
if 'roles' not in ctx.config and 'targets' not in ctx.config:
return
remotes = []
machs = []
for name in ctx.config['targets'].keys():
machs.append(name)
for t, key in ctx.config['targets'].items():
t = misc.canonicalize_hostname(t)
try:
if ctx.config['sshkeys'] == 'ignore':
key = None
except (AttributeError, KeyError):
pass
rem = remote.Remote(name=t, host_key=key, keep_alive=True)
remotes.append(rem)
if 'roles' in ctx.config:
for rem, roles in zip(remotes, ctx.config['roles']):
assert all(isinstance(role, str) for role in roles), \
"Roles in config must be strings: %r" % roles
ctx.cluster.add(rem, roles)
log.info('roles: %s - %s' % (rem, roles))
else:
for rem in remotes:
ctx.cluster.add(rem, rem.name)
def connect(ctx, config):
"""
Connect to all remotes in ctx.cluster
"""
log.info('Opening connections...')
for rem in ctx.cluster.remotes.keys():
log.debug('connecting to %s', rem.name)
rem.connect()
def push_inventory(ctx, config):
if not teuth_config.lock_server:
return
def push():
for rem in ctx.cluster.remotes.keys():
info = rem.inventory_info
teuthology.lock.ops.update_inventory(info)
try:
push()
except Exception:
log.exception("Error pushing inventory")
BUILDPACKAGES_FIRST = 0
BUILDPACKAGES_OK = 1
BUILDPACKAGES_REMOVED = 2
BUILDPACKAGES_NOTHING = 3
def buildpackages_prep(ctx, config):
"""
Make sure the 'buildpackages' task happens before
the 'install' task.
Return:
BUILDPACKAGES_NOTHING if there is no buildpackages task
BUILDPACKAGES_REMOVED if there is a buildpackages task but no install task
BUILDPACKAGES_FIRST if a buildpackages task was moved at the beginning
BUILDPACKAGES_OK if a buildpackages task already at the beginning
"""
index = 0
install_index = None
buildpackages_index = None
buildpackages_prep_index = None
for task in ctx.config['tasks']:
t = list(task)[0]
if t == 'install':
install_index = index
if t == 'buildpackages':
buildpackages_index = index
if t == 'internal.buildpackages_prep':
buildpackages_prep_index = index
index += 1
if (buildpackages_index is not None and
install_index is not None):
if buildpackages_index > buildpackages_prep_index + 1:
log.info('buildpackages moved to be the first task')
buildpackages = ctx.config['tasks'].pop(buildpackages_index)
ctx.config['tasks'].insert(buildpackages_prep_index + 1,
buildpackages)
return BUILDPACKAGES_FIRST
else:
log.info('buildpackages is already the first task')
return BUILDPACKAGES_OK
elif buildpackages_index is not None and install_index is None:
ctx.config['tasks'].pop(buildpackages_index)
all_tasks = [list(x.keys())[0] for x in ctx.config['tasks']]
log.info('buildpackages removed because no install task found in ' +
str(all_tasks))
return BUILDPACKAGES_REMOVED
elif buildpackages_index is None:
log.info('no buildpackages task found')
return BUILDPACKAGES_NOTHING
def serialize_remote_roles(ctx, config):
"""
Provides an explicit mapping for which remotes have been assigned what roles
So that other software can be loosely coupled to teuthology
"""
if ctx.archive is not None:
with open(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
info_yaml = yaml.safe_load(info_file)
info_file.seek(0)
info_yaml['cluster'] = dict([(rem.name, {'roles': roles}) for rem, roles in ctx.cluster.remotes.items()])
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
def check_ceph_data(ctx, config):
"""
Check for old /var/lib/ceph subdirectories and detect staleness.
"""
log.info('Checking for non-empty /var/lib/ceph...')
processes = ctx.cluster.run(
args='test -z $(ls -A /var/lib/ceph)',
wait=False,
)
failed = False
for proc in processes:
try:
proc.wait()
except run.CommandFailedError:
log.error('Host %s has stale /var/lib/ceph, check lock and nuke/cleanup.', proc.remote.shortname)
failed = True
if failed:
raise RuntimeError('Stale /var/lib/ceph detected, aborting.')
def check_conflict(ctx, config):
"""
Note directory use conflicts and stale directories.
"""
log.info('Checking for old test directory...')
testdir = misc.get_testdir(ctx)
processes = ctx.cluster.run(
args=['test', '!', '-e', testdir],
wait=False,
)
failed = False
for proc in processes:
try:
proc.wait()
except run.CommandFailedError:
log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir)
failed = True
if failed:
raise RuntimeError('Stale jobs detected, aborting.')
def fetch_binaries_for_coredumps(path, remote):
"""
Pul ELFs (debug and stripped) for each coredump found
"""
# Check for Coredumps:
coredump_path = os.path.join(path, 'coredump')
if os.path.isdir(coredump_path):
log.info('Transferring binaries for coredumps...')
for dump in os.listdir(coredump_path):
# Pull program from core file
dump_path = os.path.join(coredump_path, dump)
dump_info = subprocess.Popen(['file', dump_path],
stdout=subprocess.PIPE)
dump_out = dump_info.communicate()[0].decode()
# Parse file output to get program, Example output:
# 1422917770.7450.core: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, \
# from 'radosgw --rgw-socket-path /home/ubuntu/cephtest/apache/tmp.client.0/fastcgi_soc'
dump_program = dump_out.split("from '")[1].split(' ')[0]
# Find path on remote server:
remote_path = remote.sh(['which', dump_program]).rstrip()
# Pull remote program into coredump folder:
local_path = os.path.join(coredump_path,
dump_program.lstrip(os.path.sep))
local_dir = os.path.dirname(local_path)
if not os.path.exists(local_dir):
os.makedirs(local_dir)
remote._sftp_get_file(remote_path, local_path)
# Pull Debug symbols:
debug_path = os.path.join('/usr/lib/debug', remote_path)
# RPM distro's append their non-stripped ELF's with .debug
# When deb based distro's do not.
if remote.system_type == 'rpm':
debug_path = '{debug_path}.debug'.format(debug_path=debug_path)
remote.get_file(debug_path, coredump_path)
def gzip_if_too_large(compress_min_size, src, tarinfo, local_path):
if tarinfo.size >= compress_min_size:
with gzip.open(local_path + '.gz', 'wb') as dest:
shutil.copyfileobj(src, dest)
else:
misc.copy_fileobj(src, tarinfo, local_path)
@contextlib.contextmanager
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info('Creating archive directory...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=['install', '-d', '-m0755', '--', archive_dir],
wait=False,
)
)
try:
yield
except Exception:
# we need to know this below
set_status(ctx.summary, 'fail')
raise
finally:
passed = get_status(ctx.summary) == 'pass'
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and passed):
log.info('Transferring archived files...')
logdir = os.path.join(ctx.archive, 'remote')
if (not os.path.exists(logdir)):
os.mkdir(logdir)
for rem in ctx.cluster.remotes.keys():
path = os.path.join(logdir, rem.shortname)
min_size_option = ctx.config.get('log-compress-min-size',
'128MB')
try:
compress_min_size_bytes = \
humanfriendly.parse_size(min_size_option)
except humanfriendly.InvalidSize:
msg = 'invalid "log-compress-min-size": {}'.format(min_size_option)
log.error(msg)
raise ConfigError(msg)
maybe_compress = functools.partial(gzip_if_too_large,
compress_min_size_bytes)
misc.pull_directory(rem, archive_dir, path, maybe_compress)
# Check for coredumps and pull binaries
fetch_binaries_for_coredumps(path, rem)
log.info('Removing archive directory...')
run.wait(
ctx.cluster.run(
args=['rm', '-rf', '--', archive_dir],
wait=False,
),
)
@contextlib.contextmanager
def sudo(ctx, config):
"""
Enable use of sudo
"""
log.info('Configuring sudo...')
sudoers_file = '/etc/sudoers'
backup_ext = '.orig.teuthology'
tty_expr = r's/^\([^#]*\) \(requiretty\)/\1 !\2/g'
pw_expr = r's/^\([^#]*\) !\(visiblepw\)/\1 \2/g'
run.wait(
ctx.cluster.run(
args="sudo sed -i{ext} -e '{tty}' -e '{pw}' {path}".format(
ext=backup_ext, tty=tty_expr, pw=pw_expr,
path=sudoers_file
),
wait=False,
)
)
try:
yield
finally:
log.info('Restoring {0}...'.format(sudoers_file))
ctx.cluster.run(
args="sudo mv -f {path}{ext} {path}".format(
path=sudoers_file, ext=backup_ext
)
)
@contextlib.contextmanager
def coredump(ctx, config):
"""
Stash a coredump of this system if an error occurs.
"""
log.info('Enabling coredump saving...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
'{adir}/coredump'.format(adir=archive_dir),
run.Raw('&&'),
'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),
run.Raw('&&'),
'echo',
'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),
run.Raw('|'),
'sudo', 'tee', '-a', '/etc/sysctl.conf',
],
wait=False,
)
)
try:
yield
finally:
run.wait(
ctx.cluster.run(
args=[
'sudo', 'sysctl', '-w', 'kernel.core_pattern=core',
run.Raw('&&'),
# don't litter the archive dir if there were no cores dumped
'rmdir',
'--ignore-fail-on-non-empty',
'--',
'{adir}/coredump'.format(adir=archive_dir),
],
wait=False,
)
)
# set status = 'fail' if the dir is still there = coredumps were
# seen
for rem in ctx.cluster.remotes.keys():
try:
rem.sh("test -e " + archive_dir + "/coredump")
except run.CommandFailedError:
continue
log.warning('Found coredumps on %s, flagging run as failed', rem)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
'Found coredumps on {rem}'.format(rem=rem)
@contextlib.contextmanager
def archive_upload(ctx, config):
"""
Upload the archive directory to a designated location
"""
try:
yield
finally:
upload = ctx.config.get('archive_upload')
archive_path = ctx.config.get('archive_path')
if upload and archive_path:
log.info('Uploading archives ...')
upload_key = ctx.config.get('archive_upload_key')
if upload_key:
ssh = "RSYNC_RSH='ssh -i " + upload_key + "'"
else:
ssh = ''
split_path = archive_path.split('/')
split_path.insert(-2, '.')
misc.sh(ssh + " rsync -avz --relative /" +
os.path.join(*split_path) + " " +
upload)
else:
log.info('Not uploading archives.')
|
|
##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Interface Package Interfaces
$Id: interfaces.py 92521 2008-10-24 05:44:37Z baijum $
"""
__docformat__ = 'restructuredtext'
from zope.interface import Interface
from zope.interface.interface import Attribute
class IElement(Interface):
"""Objects that have basic documentation and tagged values.
"""
__name__ = Attribute('__name__', 'The object name')
__doc__ = Attribute('__doc__', 'The object doc string')
def getTaggedValue(tag):
"""Returns the value associated with `tag`.
Raise a `KeyError` of the tag isn't set.
"""
def queryTaggedValue(tag, default=None):
"""Returns the value associated with `tag`.
Return the default value of the tag isn't set.
"""
def getTaggedValueTags():
"""Returns a list of all tags."""
def setTaggedValue(tag, value):
"""Associates `value` with `key`."""
class IAttribute(IElement):
"""Attribute descriptors"""
interface = Attribute('interface',
'Stores the interface instance in which the '
'attribute is located.')
class IMethod(IAttribute):
"""Method attributes"""
def getSignatureInfo():
"""Returns the signature information.
This method returns a dictionary with the following keys:
o `positional` - All positional arguments.
o `required` - A list of all required arguments.
o `optional` - A list of all optional arguments.
o `varargs` - The name of the varargs argument.
o `kwargs` - The name of the kwargs argument.
"""
def getSignatureString():
"""Return a signature string suitable for inclusion in documentation.
This method returns the function signature string. For example, if you
have `func(a, b, c=1, d='f')`, then the signature string is `(a, b,
c=1, d='f')`.
"""
class ISpecification(Interface):
"""Object Behavioral specifications"""
def extends(other, strict=True):
"""Test whether a specification extends another
The specification extends other if it has other as a base
interface or if one of it's bases extends other.
If strict is false, then the specification extends itself.
"""
def isOrExtends(other):
"""Test whether the specification is or extends another
"""
def weakref(callback=None):
"""Return a weakref to the specification
This method is, regrettably, needed to allow weakrefs to be
computed to security-proxied specifications. While the
zope.interface package does not require zope.security or
zope.proxy, it has to be able to coexist with it.
"""
__bases__ = Attribute("""Base specifications
A tuple if specifications from which this specification is
directly derived.
""")
__sro__ = Attribute("""Specification-resolution order
A tuple of the specification and all of it's ancestor
specifications from most specific to least specific.
(This is similar to the method-resolution order for new-style classes.)
""")
__iro__ = Attribute("""Interface-resolution order
A tuple of the of the specification's ancestor interfaces from
most specific to least specific. The specification itself is
included if it is an interface.
(This is similar to the method-resolution order for new-style classes.)
""")
def get(name, default=None):
"""Look up the description for a name
If the named attribute is not defined, the default is
returned.
"""
class IInterface(ISpecification, IElement):
"""Interface objects
Interface objects describe the behavior of an object by containing
useful information about the object. This information includes:
o Prose documentation about the object. In Python terms, this
is called the "doc string" of the interface. In this element,
you describe how the object works in prose language and any
other useful information about the object.
o Descriptions of attributes. Attribute descriptions include
the name of the attribute and prose documentation describing
the attributes usage.
o Descriptions of methods. Method descriptions can include:
- Prose "doc string" documentation about the method and its
usage.
- A description of the methods arguments; how many arguments
are expected, optional arguments and their default values,
the position or arguments in the signature, whether the
method accepts arbitrary arguments and whether the method
accepts arbitrary keyword arguments.
o Optional tagged data. Interface objects (and their attributes and
methods) can have optional, application specific tagged data
associated with them. Examples uses for this are examples,
security assertions, pre/post conditions, and other possible
information you may want to associate with an Interface or its
attributes.
Not all of this information is mandatory. For example, you may
only want the methods of your interface to have prose
documentation and not describe the arguments of the method in
exact detail. Interface objects are flexible and let you give or
take any of these components.
Interfaces are created with the Python class statement using
either Interface.Interface or another interface, as in::
from zope.interface import Interface
class IMyInterface(Interface):
'''Interface documentation'''
def meth(arg1, arg2):
'''Documentation for meth'''
# Note that there is no self argument
class IMySubInterface(IMyInterface):
'''Interface documentation'''
def meth2():
'''Documentation for meth2'''
You use interfaces in two ways:
o You assert that your object implement the interfaces.
There are several ways that you can assert that an object
implements an interface:
1. Call zope.interface.implements in your class definition.
2. Call zope.interfaces.directlyProvides on your object.
3. Call 'zope.interface.classImplements' to assert that instances
of a class implement an interface.
For example::
from zope.interface import classImplements
classImplements(some_class, some_interface)
This approach is useful when it is not an option to modify
the class source. Note that this doesn't affect what the
class itself implements, but only what its instances
implement.
o You query interface meta-data. See the IInterface methods and
attributes for details.
"""
def providedBy(object):
"""Test whether the interface is implemented by the object
Return true of the object asserts that it implements the
interface, including asserting that it implements an extended
interface.
"""
def implementedBy(class_):
"""Test whether the interface is implemented by instances of the class
Return true of the class asserts that its instances implement the
interface, including asserting that they implement an extended
interface.
"""
def names(all=False):
"""Get the interface attribute names
Return a sequence of the names of the attributes, including
methods, included in the interface definition.
Normally, only directly defined attributes are included. If
a true positional or keyword argument is given, then
attributes defined by base classes will be included.
"""
def namesAndDescriptions(all=False):
"""Get the interface attribute names and descriptions
Return a sequence of the names and descriptions of the
attributes, including methods, as name-value pairs, included
in the interface definition.
Normally, only directly defined attributes are included. If
a true positional or keyword argument is given, then
attributes defined by base classes will be included.
"""
def __getitem__(name):
"""Get the description for a name
If the named attribute is not defined, a KeyError is raised.
"""
def direct(name):
"""Get the description for the name if it was defined by the interface
If the interface doesn't define the name, returns None.
"""
def validateInvariants(obj, errors=None):
"""Validate invariants
Validate object to defined invariants. If errors is None,
raises first Invalid error; if errors is a list, appends all errors
to list, then raises Invalid with the errors as the first element
of the "args" tuple."""
def __contains__(name):
"""Test whether the name is defined by the interface"""
def __iter__():
"""Return an iterator over the names defined by the interface
The names iterated include all of the names defined by the
interface directly and indirectly by base interfaces.
"""
__module__ = Attribute("""The name of the module defining the interface""")
class IDeclaration(ISpecification):
"""Interface declaration
Declarations are used to express the interfaces implemented by
classes or provided by objects.
"""
def __contains__(interface):
"""Test whether an interface is in the specification
Return true if the given interface is one of the interfaces in
the specification and false otherwise.
"""
def __iter__():
"""Return an iterator for the interfaces in the specification
"""
def flattened():
"""Return an iterator of all included and extended interfaces
An iterator is returned for all interfaces either included in
or extended by interfaces included in the specifications
without duplicates. The interfaces are in "interface
resolution order". The interface resolution order is such that
base interfaces are listed after interfaces that extend them
and, otherwise, interfaces are included in the order that they
were defined in the specification.
"""
def __sub__(interfaces):
"""Create an interface specification with some interfaces excluded
The argument can be an interface or an interface
specifications. The interface or interfaces given in a
specification are subtracted from the interface specification.
Removing an interface that is not in the specification does
not raise an error. Doing so has no effect.
Removing an interface also removes sub-interfaces of the interface.
"""
def __add__(interfaces):
"""Create an interface specification with some interfaces added
The argument can be an interface or an interface
specifications. The interface or interfaces given in a
specification are added to the interface specification.
Adding an interface that is already in the specification does
not raise an error. Doing so has no effect.
"""
def __nonzero__():
"""Return a true value of the interface specification is non-empty
"""
class IInterfaceDeclaration(Interface):
"""Declare and check the interfaces of objects
The functions defined in this interface are used to declare the
interfaces that objects provide and to query the interfaces that have
been declared.
Interfaces can be declared for objects in two ways:
- Interfaces are declared for instances of the object's class
- Interfaces are declared for the object directly.
The interfaces declared for an object are, therefore, the union of
interfaces declared for the object directly and the interfaces
declared for instances of the object's class.
Note that we say that a class implements the interfaces provided
by it's instances. An instance can also provide interfaces
directly. The interfaces provided by an object are the union of
the interfaces provided directly and the interfaces implemented by
the class.
"""
def providedBy(ob):
"""Return the interfaces provided by an object
This is the union of the interfaces directly provided by an
object and interfaces implemented by it's class.
The value returned is an IDeclaration.
"""
def implementedBy(class_):
"""Return the interfaces implemented for a class' instances
The value returned is an IDeclaration.
"""
def classImplements(class_, *interfaces):
"""Declare additional interfaces implemented for instances of a class
The arguments after the class are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Consider the following example::
class C(A, B):
...
classImplements(C, I1, I2)
Instances of ``C`` provide ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` provide.
"""
def implementer(*interfaces):
"""Create a decorator for declaring interfaces implemented by a facory
A callable is returned that makes an implements declaration on
objects passed to it.
"""
def classImplementsOnly(class_, *interfaces):
"""Declare the only interfaces implemented by instances of a class
The arguments after the class are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) replace any previous declarations.
Consider the following example::
class C(A, B):
...
classImplements(C, IA, IB. IC)
classImplementsOnly(C. I1, I2)
Instances of ``C`` provide only ``I1``, ``I2``, and regardless of
whatever interfaces instances of ``A`` and ``B`` implement.
"""
def directlyProvidedBy(object):
"""Return the interfaces directly provided by the given object
The value returned is an IDeclaration.
"""
def directlyProvides(object, *interfaces):
"""Declare interfaces declared directly for an object
The arguments after the object are one or more interfaces or
interface specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) replace interfaces previously
declared for the object.
Consider the following example::
class C(A, B):
...
ob = C()
directlyProvides(ob, I1, I2)
The object, ``ob`` provides ``I1``, ``I2``, and whatever interfaces
instances have been declared for instances of ``C``.
To remove directly provided interfaces, use ``directlyProvidedBy`` and
subtract the unwanted interfaces. For example::
directlyProvides(ob, directlyProvidedBy(ob)-I2)
removes I2 from the interfaces directly provided by
``ob``. The object, ``ob`` no longer directly provides ``I2``,
although it might still provide ``I2`` if it's class
implements ``I2``.
To add directly provided interfaces, use ``directlyProvidedBy`` and
include additional interfaces. For example::
directlyProvides(ob, directlyProvidedBy(ob), I2)
adds I2 to the interfaces directly provided by ob.
"""
def alsoProvides(object, *interfaces):
"""Declare additional interfaces directly for an object::
alsoProvides(ob, I1)
is equivalent to::
directivelyProvides(ob, directlyProvidedBy(ob), I1)
"""
def noLongerProvides(object, interface):
"""Remove an interface from the list of an object's directly
provided interfaces::
noLongerProvides(ob, I1)
is equivalent to::
directlyProvides(ob, directlyProvidedBy(ob)-I1)
with the exception that if ``I1`` is an interface that is
provided by ``ob`` through the class's implementation,
ValueError is raised.
"""
def implements(*interfaces):
"""Declare interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The interfaces given (including the interfaces in the
specifications) are added to any interfaces previously
declared.
Previous declarations include declarations for base classes
unless implementsOnly was used.
This function is provided for convenience. It provides a more
convenient way to call classImplements. For example::
implements(I1)
is equivalent to calling::
classImplements(C, I1)
after the class has been created.
Consider the following example::
class C(A, B):
implements(I1, I2)
Instances of ``C`` implement ``I1``, ``I2``, and whatever interfaces
instances of ``A`` and ``B`` implement.
"""
def implementsOnly(*interfaces):
"""Declare the only interfaces implemented by instances of a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
Previous declarations including declarations for base classes
are overridden.
This function is provided for convenience. It provides a more
convenient way to call classImplementsOnly. For example::
implementsOnly(I1)
is equivalent to calling::
classImplementsOnly(I1)
after the class has been created.
Consider the following example::
class C(A, B):
implementsOnly(I1, I2)
Instances of ``C`` implement ``I1``, ``I2``, regardless of what
instances of ``A`` and ``B`` implement.
"""
def classProvides(*interfaces):
"""Declare interfaces provided directly by a class
This function is called in a class definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The given interfaces (including the interfaces in the
specifications) are used to create the class's direct-object
interface specification. An error will be raised if the module
class has an direct interface specification. In other words, it is
an error to call this function more than once in a class
definition.
Note that the given interfaces have nothing to do with the
interfaces implemented by instances of the class.
This function is provided for convenience. It provides a more
convenient way to call directlyProvides for a class. For example::
classProvides(I1)
is equivalent to calling::
directlyProvides(theclass, I1)
after the class has been created.
"""
def moduleProvides(*interfaces):
"""Declare interfaces provided by a module
This function is used in a module definition.
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
The given interfaces (including the interfaces in the
specifications) are used to create the module's direct-object
interface specification. An error will be raised if the module
already has an interface specification. In other words, it is
an error to call this function more than once in a module
definition.
This function is provided for convenience. It provides a more
convenient way to call directlyProvides for a module. For example::
moduleImplements(I1)
is equivalent to::
directlyProvides(sys.modules[__name__], I1)
"""
def Declaration(*interfaces):
"""Create an interface specification
The arguments are one or more interfaces or interface
specifications (IDeclaration objects).
A new interface specification (IDeclaration) with
the given interfaces is returned.
"""
class IAdapterRegistry(Interface):
"""Provide an interface-based registry for adapters
This registry registers objects that are in some sense "from" a
sequence of specification to an interface and a name.
No specific semantics are assumed for the registered objects,
however, the most common application will be to register factories
that adapt objects providing required specifications to a provided
interface.
"""
def register(required, provided, name, value):
"""Register a value
A value is registered for a *sequence* of required specifications, a
provided interface, and a name.
"""
def registered(required, provided, name=u''):
"""Return the component registered for the given interfaces and name
Unlike the lookup method, this methods won't retrieve
components registered for more specific required interfaces or
less specific provided interfaces.
If no component was registered exactly for the given
interfaces and name, then None is returned.
"""
def lookup(required, provided, name='', default=None):
"""Lookup a value
A value is looked up based on a *sequence* of required
specifications, a provided interface, and a name.
"""
def queryMultiAdapter(objects, provided, name=u'', default=None):
"""Adapt a sequence of objects to a named, provided, interface
"""
def lookup1(required, provided, name=u'', default=None):
"""Lookup a value using a single required interface
A value is looked up based on a single required
specifications, a provided interface, and a name.
"""
def queryAdapter(object, provided, name=u'', default=None):
"""Adapt an object using a registered adapter factory.
"""
def adapter_hook(provided, object, name=u'', default=None):
"""Adapt an object using a registered adapter factory.
"""
def lookupAll(required, provided):
"""Find all adapters from the required to the provided interfaces
An iterable object is returned that provides name-value two-tuples.
"""
def names(required, provided):
"""Return the names for which there are registered objects
"""
def subscribe(required, provided, subscriber, name=u''):
"""Register a subscriber
A subscriber is registered for a *sequence* of required
specifications, a provided interface, and a name.
Multiple subscribers may be registered for the same (or
equivalent) interfaces.
"""
def subscriptions(required, provided, name=u''):
"""Get a sequence of subscribers
Subscribers for a *sequence* of required interfaces, and a provided
interface are returned.
"""
def subscribers(objects, provided, name=u''):
"""Get a sequence of subscription adapters
"""
|
|
"""Miscellaneous goodies for psycopg2
This module is a generic place used to hold little helper functions
and classes until a better place in the distribution is found.
"""
# psycopg/extras.py - miscellaneous extra goodies for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <[email protected]>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import os as _os
import sys as _sys
import time as _time
import re as _re
try:
import logging as _logging
except:
_logging = None
import psycopg2
from psycopg2 import extensions as _ext
from psycopg2.extensions import cursor as _cursor
from psycopg2.extensions import connection as _connection
from psycopg2.extensions import adapt as _A
from psycopg2.extensions import b
class DictCursorBase(_cursor):
"""Base class for all dict-like cursors."""
def __init__(self, *args, **kwargs):
if 'row_factory' in kwargs:
row_factory = kwargs['row_factory']
del kwargs['row_factory']
else:
raise NotImplementedError(
"DictCursorBase can't be instantiated without a row factory.")
super(DictCursorBase, self).__init__(*args, **kwargs)
self._query_executed = 0
self._prefetch = 0
self.row_factory = row_factory
def fetchone(self):
if self._prefetch:
res = super(DictCursorBase, self).fetchone()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchone()
return res
def fetchmany(self, size=None):
if self._prefetch:
res = super(DictCursorBase, self).fetchmany(size)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchmany(size)
return res
def fetchall(self):
if self._prefetch:
res = super(DictCursorBase, self).fetchall()
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).fetchall()
return res
def __iter__(self):
if self._prefetch:
res = super(DictCursorBase, self).__iter__()
first = next(res)
if self._query_executed:
self._build_index()
if not self._prefetch:
res = super(DictCursorBase, self).__iter__()
first = next(res)
yield first
while 1:
yield next(res)
class DictConnection(_connection):
"""A connection that uses `DictCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', DictCursor)
return super(DictConnection, self).cursor(*args, **kwargs)
class DictCursor(DictCursorBase):
"""A cursor that keeps a list of column name -> index mappings."""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = DictRow
super(DictCursor, self).__init__(*args, **kwargs)
self._prefetch = 1
def execute(self, query, vars=None):
self.index = {}
self._query_executed = 1
return super(DictCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.index = {}
self._query_executed = 1
return super(DictCursor, self).callproc(procname, vars)
def _build_index(self):
if self._query_executed == 1 and self.description:
for i in range(len(self.description)):
self.index[self.description[i][0]] = i
self._query_executed = 0
class DictRow(list):
"""A row object that allow by-column-name access to data."""
__slots__ = ('_index',)
def __init__(self, cursor):
self._index = cursor.index
self[:] = [None] * len(cursor.description)
def __getitem__(self, x):
if not isinstance(x, (int, slice)):
x = self._index[x]
return list.__getitem__(self, x)
def __setitem__(self, x, v):
if not isinstance(x, (int, slice)):
x = self._index[x]
list.__setitem__(self, x, v)
def items(self):
return list(self.items())
def keys(self):
return list(self._index.keys())
def values(self):
return tuple(self[:])
def has_key(self, x):
return x in self._index
def get(self, x, default=None):
try:
return self[x]
except:
return default
def iteritems(self):
for n, v in self._index.items():
yield n, list.__getitem__(self, v)
def iterkeys(self):
return iter(self._index.keys())
def itervalues(self):
return list.__iter__(self)
def copy(self):
return dict(iter(self.items()))
def __contains__(self, x):
return x in self._index
def __getstate__(self):
return self[:], self._index.copy()
def __setstate__(self, data):
self[:] = data[0]
self._index = data[1]
# drop the crusty Py2 methods
if _sys.version_info[0] > 2:
items = iteritems; del iteritems
keys = iterkeys; del iterkeys
values = itervalues; del itervalues
del has_key
class RealDictConnection(_connection):
"""A connection that uses `RealDictCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', RealDictCursor)
return super(RealDictConnection, self).cursor(*args, **kwargs)
class RealDictCursor(DictCursorBase):
"""A cursor that uses a real dict as the base type for rows.
Note that this cursor is extremely specialized and does not allow
the normal access (using integer indices) to fetched data. If you need
to access database rows both as a dictionary and a list, then use
the generic `DictCursor` instead of `!RealDictCursor`.
"""
def __init__(self, *args, **kwargs):
kwargs['row_factory'] = RealDictRow
super(RealDictCursor, self).__init__(*args, **kwargs)
self._prefetch = 0
def execute(self, query, vars=None):
self.column_mapping = []
self._query_executed = 1
return super(RealDictCursor, self).execute(query, vars)
def callproc(self, procname, vars=None):
self.column_mapping = []
self._query_executed = 1
return super(RealDictCursor, self).callproc(procname, vars)
def _build_index(self):
if self._query_executed == 1 and self.description:
for i in range(len(self.description)):
self.column_mapping.append(self.description[i][0])
self._query_executed = 0
class RealDictRow(dict):
"""A `!dict` subclass representing a data record."""
__slots__ = ('_column_mapping')
def __init__(self, cursor):
dict.__init__(self)
# Required for named cursors
if cursor.description and not cursor.column_mapping:
cursor._build_index()
self._column_mapping = cursor.column_mapping
def __setitem__(self, name, value):
if type(name) == int:
name = self._column_mapping[name]
return dict.__setitem__(self, name, value)
def __getstate__(self):
return (self.copy(), self._column_mapping[:])
def __setstate__(self, data):
self.update(data[0])
self._column_mapping = data[1]
class NamedTupleConnection(_connection):
"""A connection that uses `NamedTupleCursor` automatically."""
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', NamedTupleCursor)
return super(NamedTupleConnection, self).cursor(*args, **kwargs)
class NamedTupleCursor(_cursor):
"""A cursor that generates results as `~collections.namedtuple`.
`!fetch*()` methods will return named tuples instead of regular tuples, so
their elements can be accessed both as regular numeric items as well as
attributes.
>>> nt_cur = conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
>>> rec = nt_cur.fetchone()
>>> rec
Record(id=1, num=100, data="abc'def")
>>> rec[1]
100
>>> rec.data
"abc'def"
"""
Record = None
def execute(self, query, vars=None):
self.Record = None
return super(NamedTupleCursor, self).execute(query, vars)
def executemany(self, query, vars):
self.Record = None
return super(NamedTupleCursor, self).executemany(query, vars)
def callproc(self, procname, vars=None):
self.Record = None
return super(NamedTupleCursor, self).callproc(procname, vars)
def fetchone(self):
t = super(NamedTupleCursor, self).fetchone()
if t is not None:
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
return nt._make(t)
def fetchmany(self, size=None):
ts = super(NamedTupleCursor, self).fetchmany(size)
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
return list(map(nt._make, ts))
def fetchall(self):
ts = super(NamedTupleCursor, self).fetchall()
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
return list(map(nt._make, ts))
def __iter__(self):
it = super(NamedTupleCursor, self).__iter__()
t = next(it)
nt = self.Record
if nt is None:
nt = self.Record = self._make_nt()
yield nt._make(t)
while 1:
yield nt._make(next(it))
try:
from collections import namedtuple
except ImportError as _exc:
def _make_nt(self):
raise self._exc
else:
def _make_nt(self, namedtuple=namedtuple):
return namedtuple("Record", [d[0] for d in self.description or ()])
class LoggingConnection(_connection):
"""A connection that logs all queries to a file or logger__ object.
.. __: http://docs.python.org/library/logging.html
"""
def initialize(self, logobj):
"""Initialize the connection to log to `!logobj`.
The `!logobj` parameter can be an open file object or a Logger
instance from the standard logging module.
"""
self._logobj = logobj
if _logging and isinstance(logobj, _logging.Logger):
self.log = self._logtologger
else:
self.log = self._logtofile
def filter(self, msg, curs):
"""Filter the query before logging it.
This is the method to overwrite to filter unwanted queries out of the
log or to add some extra data to the output. The default implementation
just does nothing.
"""
return msg
def _logtofile(self, msg, curs):
msg = self.filter(msg, curs)
if msg: self._logobj.write(msg + _os.linesep)
def _logtologger(self, msg, curs):
msg = self.filter(msg, curs)
if msg: self._logobj.debug(msg)
def _check(self):
if not hasattr(self, '_logobj'):
raise self.ProgrammingError(
"LoggingConnection object has not been initialize()d")
def cursor(self, *args, **kwargs):
self._check()
kwargs.setdefault('cursor_factory', LoggingCursor)
return super(LoggingConnection, self).cursor(*args, **kwargs)
class LoggingCursor(_cursor):
"""A cursor that logs queries using its connection logging facilities."""
def execute(self, query, vars=None):
try:
return super(LoggingCursor, self).execute(query, vars)
finally:
self.connection.log(self.query, self)
def callproc(self, procname, vars=None):
try:
return super(LoggingCursor, self).callproc(procname, vars)
finally:
self.connection.log(self.query, self)
class MinTimeLoggingConnection(LoggingConnection):
"""A connection that logs queries based on execution time.
This is just an example of how to sub-class `LoggingConnection` to
provide some extra filtering for the logged queries. Both the
`initialize()` and `filter()` methods are overwritten to make sure
that only queries executing for more than ``mintime`` ms are logged.
Note that this connection uses the specialized cursor
`MinTimeLoggingCursor`.
"""
def initialize(self, logobj, mintime=0):
LoggingConnection.initialize(self, logobj)
self._mintime = mintime
def filter(self, msg, curs):
t = (_time.time() - curs.timestamp) * 1000
if t > self._mintime:
return msg + _os.linesep + " (execution time: %d ms)" % t
def cursor(self, *args, **kwargs):
kwargs.setdefault('cursor_factory', MinTimeLoggingCursor)
return LoggingConnection.cursor(self, *args, **kwargs)
class MinTimeLoggingCursor(LoggingCursor):
"""The cursor sub-class companion to `MinTimeLoggingConnection`."""
def execute(self, query, vars=None):
self.timestamp = _time.time()
return LoggingCursor.execute(self, query, vars)
def callproc(self, procname, vars=None):
self.timestamp = _time.time()
return LoggingCursor.callproc(self, procname, vars)
# a dbtype and adapter for Python UUID type
class UUID_adapter(object):
"""Adapt Python's uuid.UUID__ type to PostgreSQL's uuid__.
.. __: http://docs.python.org/library/uuid.html
.. __: http://www.postgresql.org/docs/current/static/datatype-uuid.html
"""
def __init__(self, uuid):
self._uuid = uuid
def __conform__(self, proto):
if proto is _ext.ISQLQuote:
return self
def getquoted(self):
return b("'%s'::uuid" % self._uuid)
def __str__(self):
return "'%s'::uuid" % self._uuid
def register_uuid(oids=None, conn_or_curs=None):
"""Create the UUID type and an uuid.UUID adapter.
:param oids: oid for the PostgreSQL :sql:`uuid` type, or 2-items sequence
with oids of the type and the array. If not specified, use PostgreSQL
standard oids.
:param conn_or_curs: where to register the typecaster. If not specified,
register it globally.
"""
import uuid
if not oids:
oid1 = 2950
oid2 = 2951
elif isinstance(oids, (list, tuple)):
oid1, oid2 = oids
else:
oid1 = oids
oid2 = 2951
_ext.UUID = _ext.new_type((oid1, ), "UUID",
lambda data, cursor: data and uuid.UUID(data) or None)
_ext.UUIDARRAY = _ext.new_array_type((oid2,), "UUID[]", _ext.UUID)
_ext.register_type(_ext.UUID, conn_or_curs)
_ext.register_type(_ext.UUIDARRAY, conn_or_curs)
_ext.register_adapter(uuid.UUID, UUID_adapter)
return _ext.UUID
# a type, dbtype and adapter for PostgreSQL inet type
class Inet(object):
"""Wrap a string to allow for correct SQL-quoting of inet values.
Note that this adapter does NOT check the passed value to make
sure it really is an inet-compatible address but DOES call adapt()
on it to make sure it is impossible to execute an SQL-injection
by passing an evil value to the initializer.
"""
def __init__(self, addr):
self.addr = addr
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.addr)
def prepare(self, conn):
self._conn = conn
def getquoted(self):
obj = _A(self.addr)
if hasattr(obj, 'prepare'):
obj.prepare(self._conn)
return obj.getquoted() + b"::inet"
def __conform__(self, proto):
if proto is _ext.ISQLQuote:
return self
def __str__(self):
return str(self.addr)
def register_inet(oid=None, conn_or_curs=None):
"""Create the INET type and an Inet adapter.
:param oid: oid for the PostgreSQL :sql:`inet` type, or 2-items sequence
with oids of the type and the array. If not specified, use PostgreSQL
standard oids.
:param conn_or_curs: where to register the typecaster. If not specified,
register it globally.
"""
if not oid:
oid1 = 869
oid2 = 1041
elif isinstance(oid, (list, tuple)):
oid1, oid2 = oid
else:
oid1 = oid
oid2 = 1041
_ext.INET = _ext.new_type((oid1, ), "INET",
lambda data, cursor: data and Inet(data) or None)
_ext.INETARRAY = _ext.new_array_type((oid2, ), "INETARRAY", _ext.INET)
_ext.register_type(_ext.INET, conn_or_curs)
_ext.register_type(_ext.INETARRAY, conn_or_curs)
return _ext.INET
def register_tstz_w_secs(oids=None, conn_or_curs=None):
"""The function used to register an alternate type caster for
:sql:`TIMESTAMP WITH TIME ZONE` to deal with historical time zones with
seconds in the UTC offset.
These are now correctly handled by the default type caster, so currently
the function doesn't do anything.
"""
import warnings
warnings.warn("deprecated", DeprecationWarning)
def wait_select(conn):
"""Wait until a connection or cursor has data available.
The function is an example of a wait callback to be registered with
`~psycopg2.extensions.set_wait_callback()`. This function uses
:py:func:`~select.select()` to wait for data available.
"""
import select
from psycopg2.extensions import POLL_OK, POLL_READ, POLL_WRITE
while 1:
try:
state = conn.poll()
if state == POLL_OK:
break
elif state == POLL_READ:
select.select([conn.fileno()], [], [])
elif state == POLL_WRITE:
select.select([], [conn.fileno()], [])
else:
raise conn.OperationalError("bad state from poll: %s" % state)
except KeyboardInterrupt:
conn.cancel()
# the loop will be broken by a server error
continue
def _solve_conn_curs(conn_or_curs):
"""Return the connection and a DBAPI cursor from a connection or cursor."""
if conn_or_curs is None:
raise psycopg2.ProgrammingError("no connection or cursor provided")
if hasattr(conn_or_curs, 'execute'):
conn = conn_or_curs.connection
curs = conn.cursor(cursor_factory=_cursor)
else:
conn = conn_or_curs
curs = conn.cursor(cursor_factory=_cursor)
return conn, curs
class HstoreAdapter(object):
"""Adapt a Python dict to the hstore syntax."""
def __init__(self, wrapped):
self.wrapped = wrapped
def prepare(self, conn):
self.conn = conn
# use an old-style getquoted implementation if required
if conn.server_version < 90000:
self.getquoted = self._getquoted_8
def _getquoted_8(self):
"""Use the operators available in PG pre-9.0."""
if not self.wrapped:
return b"''::hstore"
adapt = _ext.adapt
rv = []
for k, v in self.wrapped.items():
k = adapt(k)
k.prepare(self.conn)
k = k.getquoted()
if v is not None:
v = adapt(v)
v.prepare(self.conn)
v = v.getquoted()
else:
v = b'NULL'
# XXX this b'ing is painfully inefficient!
rv.append(b"(" + k + b" => " + v + b")")
return b"(" + b'||'.join(rv) + b")"
def _getquoted_9(self):
"""Use the hstore(text[], text[]) function."""
if not self.wrapped:
return b"''::hstore"
k = _ext.adapt(list(self.wrapped.keys()))
k.prepare(self.conn)
v = _ext.adapt(list(self.wrapped.values()))
v.prepare(self.conn)
return b"hstore(" + k.getquoted() + b", " + v.getquoted() + b")"
getquoted = _getquoted_9
_re_hstore = _re.compile(r"""
# hstore key:
# a string of normal or escaped chars
"((?: [^"\\] | \\. )*)"
\s*=>\s* # hstore value
(?:
NULL # the value can be null - not catched
# or a quoted string like the key
| "((?: [^"\\] | \\. )*)"
)
(?:\s*,\s*|$) # pairs separated by comma or end of string.
""", _re.VERBOSE)
@classmethod
def parse(self, s, cur, _bsdec=_re.compile(r"\\(.)")):
"""Parse an hstore representation in a Python string.
The hstore is represented as something like::
"a"=>"1", "b"=>"2"
with backslash-escaped strings.
"""
if s is None:
return None
rv = {}
start = 0
for m in self._re_hstore.finditer(s):
if m is None or m.start() != start:
raise psycopg2.InterfaceError(
"error parsing hstore pair at char %d" % start)
k = _bsdec.sub(r'\1', m.group(1))
v = m.group(2)
if v is not None:
v = _bsdec.sub(r'\1', v)
rv[k] = v
start = m.end()
if start < len(s):
raise psycopg2.InterfaceError(
"error parsing hstore: unparsed data after char %d" % start)
return rv
@classmethod
def parse_unicode(self, s, cur):
"""Parse an hstore returning unicode keys and values."""
if s is None:
return None
s = s.decode(_ext.encodings[cur.connection.encoding])
return self.parse(s, cur)
@classmethod
def get_oids(self, conn_or_curs):
"""Return the lists of OID of the hstore and hstore[] types.
"""
conn, curs = _solve_conn_curs(conn_or_curs)
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# column typarray not available before PG 8.3
typarray = conn.server_version >= 80300 and "typarray" or "NULL"
rv0, rv1 = [], []
# get the oid for the hstore
curs.execute("""\
SELECT t.oid, %s
FROM pg_type t JOIN pg_namespace ns
ON typnamespace = ns.oid
WHERE typname = 'hstore';
""" % typarray)
for oids in curs:
rv0.append(oids[0])
rv1.append(oids[1])
# revert the status of the connection as before the command
if (conn_status != _ext.STATUS_IN_TRANSACTION
and not conn.autocommit):
conn.rollback()
return tuple(rv0), tuple(rv1)
def register_hstore(conn_or_curs, globally=False, str=False,
oid=None, array_oid=None):
"""Register adapter and typecaster for `!dict`\-\ |hstore| conversions.
:param conn_or_curs: a connection or cursor: the typecaster will be
registered only on this object unless *globally* is set to `!True`
:param globally: register the adapter globally, not only on *conn_or_curs*
:param unicode: if `!True`, keys and values returned from the database
will be `!unicode` instead of `!str`. The option is not available on
Python 3
:param oid: the OID of the |hstore| type if known. If not, it will be
queried on *conn_or_curs*.
:param array_oid: the OID of the |hstore| array type if known. If not, it
will be queried on *conn_or_curs*.
The connection or cursor passed to the function will be used to query the
database and look for the OID of the |hstore| type (which may be different
across databases). If querying is not desirable (e.g. with
:ref:`asynchronous connections <async-support>`) you may specify it in the
*oid* parameter, which can be found using a query such as :sql:`SELECT
'hstore'::regtype::oid`. Analogously you can obtain a value for *array_oid*
using a query such as :sql:`SELECT 'hstore[]'::regtype::oid`.
Note that, when passing a dictionary from Python to the database, both
strings and unicode keys and values are supported. Dictionaries returned
from the database have keys/values according to the *unicode* parameter.
The |hstore| contrib module must be already installed in the database
(executing the ``hstore.sql`` script in your ``contrib`` directory).
Raise `~psycopg2.ProgrammingError` if the type is not found.
"""
if oid is None:
oid = HstoreAdapter.get_oids(conn_or_curs)
if oid is None or not oid[0]:
raise psycopg2.ProgrammingError(
"hstore type not found in the database. "
"please install it from your 'contrib/hstore.sql' file")
else:
array_oid = oid[1]
oid = oid[0]
if isinstance(oid, int):
oid = (oid,)
if array_oid is not None:
if isinstance(array_oid, int):
array_oid = (array_oid,)
else:
array_oid = tuple([x for x in array_oid if x])
# create and register the typecaster
if _sys.version_info[0] < 3 and str:
cast = HstoreAdapter.parse_unicode
else:
cast = HstoreAdapter.parse
HSTORE = _ext.new_type(oid, "HSTORE", cast)
_ext.register_type(HSTORE, not globally and conn_or_curs or None)
_ext.register_adapter(dict, HstoreAdapter)
if array_oid:
HSTOREARRAY = _ext.new_array_type(array_oid, "HSTOREARRAY", HSTORE)
_ext.register_type(HSTOREARRAY, not globally and conn_or_curs or None)
class CompositeCaster(object):
"""Helps conversion of a PostgreSQL composite type into a Python object.
The class is usually created by the `register_composite()` function.
You may want to create and register manually instances of the class if
querying the database at registration time is not desirable (such as when
using an :ref:`asynchronous connections <async-support>`).
"""
def __init__(self, name, oid, attrs, array_oid=None, schema=None):
self.name = name
self.schema = schema
self.oid = oid
self.array_oid = array_oid
self.attnames = [ a[0] for a in attrs ]
self.atttypes = [ a[1] for a in attrs ]
self._create_type(name, self.attnames)
self.typecaster = _ext.new_type((oid,), name, self.parse)
if array_oid:
self.array_typecaster = _ext.new_array_type(
(array_oid,), "%sARRAY" % name, self.typecaster)
else:
self.array_typecaster = None
def parse(self, s, curs):
if s is None:
return None
tokens = self.tokenize(s)
if len(tokens) != len(self.atttypes):
raise psycopg2.DataError(
"expecting %d components for the type %s, %d found instead" %
(len(self.atttypes), self.name, len(tokens)))
values = [ curs.cast(oid, token)
for oid, token in zip(self.atttypes, tokens) ]
return self.make(values)
def make(self, values):
"""Return a new Python object representing the data being casted.
*values* is the list of attributes, already casted into their Python
representation.
You can subclass this method to :ref:`customize the composite cast
<custom-composite>`.
"""
return self._ctor(values)
_re_tokenize = _re.compile(r"""
\(? ([,)]) # an empty token, representing NULL
| \(? " ((?: [^"] | "")*) " [,)] # or a quoted string
| \(? ([^",)]+) [,)] # or an unquoted string
""", _re.VERBOSE)
_re_undouble = _re.compile(r'(["\\])\1')
@classmethod
def tokenize(self, s):
rv = []
for m in self._re_tokenize.finditer(s):
if m is None:
raise psycopg2.InterfaceError("can't parse type: %r" % s)
if m.group(1) is not None:
rv.append(None)
elif m.group(2) is not None:
rv.append(self._re_undouble.sub(r"\1", m.group(2)))
else:
rv.append(m.group(3))
return rv
def _create_type(self, name, attnames):
try:
from collections import namedtuple
except ImportError:
self.type = tuple
self._ctor = self.type
else:
self.type = namedtuple(name, attnames)
self._ctor = self.type._make
@classmethod
def _from_db(self, name, conn_or_curs):
"""Return a `CompositeCaster` instance for the type *name*.
Raise `ProgrammingError` if the type is not found.
"""
conn, curs = _solve_conn_curs(conn_or_curs)
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# Use the correct schema
if '.' in name:
schema, tname = name.split('.', 1)
else:
tname = name
schema = 'public'
# column typarray not available before PG 8.3
typarray = conn.server_version >= 80300 and "typarray" or "NULL"
# get the type oid and attributes
curs.execute("""\
SELECT t.oid, %s, attname, atttypid
FROM pg_type t
JOIN pg_namespace ns ON typnamespace = ns.oid
JOIN pg_attribute a ON attrelid = typrelid
WHERE typname = %%s AND nspname = %%s
AND attnum > 0 AND NOT attisdropped
ORDER BY attnum;
""" % typarray, (tname, schema))
recs = curs.fetchall()
# revert the status of the connection as before the command
if (conn_status != _ext.STATUS_IN_TRANSACTION
and not conn.autocommit):
conn.rollback()
if not recs:
raise psycopg2.ProgrammingError(
"PostgreSQL type '%s' not found" % name)
type_oid = recs[0][0]
array_oid = recs[0][1]
type_attrs = [ (r[2], r[3]) for r in recs ]
return self(tname, type_oid, type_attrs,
array_oid=array_oid, schema=schema)
def register_composite(name, conn_or_curs, globally=False, factory=None):
"""Register a typecaster to convert a composite type into a tuple.
:param name: the name of a PostgreSQL composite type, e.g. created using
the |CREATE TYPE|_ command
:param conn_or_curs: a connection or cursor used to find the type oid and
components; the typecaster is registered in a scope limited to this
object, unless *globally* is set to `!True`
:param globally: if `!False` (default) register the typecaster only on
*conn_or_curs*, otherwise register it globally
:param factory: if specified it should be a `CompositeCaster` subclass: use
it to :ref:`customize how to cast composite types <custom-composite>`
:return: the registered `CompositeCaster` or *factory* instance
responsible for the conversion
"""
if factory is None:
factory = CompositeCaster
caster = factory._from_db(name, conn_or_curs)
_ext.register_type(caster.typecaster, not globally and conn_or_curs or None)
if caster.array_typecaster is not None:
_ext.register_type(caster.array_typecaster, not globally and conn_or_curs or None)
return caster
# expose the json adaptation stuff into the module
from psycopg2._json import json, Json, register_json
from psycopg2._json import register_default_json, register_default_jsonb
# Expose range-related objects
from psycopg2._range import Range, NumericRange
from psycopg2._range import DateRange, DateTimeRange, DateTimeTZRange
from psycopg2._range import register_range, RangeAdapter, RangeCaster
|
|
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, isna, notna
import pandas._testing as tm
def test_expanding_corr(series):
A = series.dropna()
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().corr(B)
rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_count(series):
result = series.expanding(min_periods=0).count()
tm.assert_almost_equal(
result, series.rolling(window=len(series), min_periods=0).count()
)
def test_expanding_quantile(series):
result = series.expanding().quantile(0.5)
rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)
tm.assert_almost_equal(result, rolling_result)
def test_expanding_cov(series):
A = series
B = (A + np.random.randn(len(A)))[:-5]
result = A.expanding().cov(B)
rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)
tm.assert_almost_equal(rolling_result, result)
def test_expanding_cov_pairwise(frame):
result = frame.expanding().cov()
rolling_result = frame.rolling(window=len(frame), min_periods=1).cov()
tm.assert_frame_equal(result, rolling_result)
def test_expanding_corr_pairwise(frame):
result = frame.expanding().corr()
rolling_result = frame.rolling(window=len(frame), min_periods=1).corr()
tm.assert_frame_equal(result, rolling_result)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_func(func, static_comp, frame_or_series):
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = getattr(data.expanding(min_periods=1, axis=0), func)()
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[10], static_comp(data[:11]))
else:
tm.assert_series_equal(
result.iloc[10], static_comp(data[:11]), check_names=False
)
@pytest.mark.parametrize(
"func,static_comp",
[("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],
ids=["sum", "mean", "max", "min"],
)
def test_expanding_min_periods(func, static_comp):
ser = Series(np.random.randn(50))
result = getattr(ser.expanding(min_periods=30, axis=0), func)()
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
# min_periods is working correctly
result = getattr(ser.expanding(min_periods=15, axis=0), func)()
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = getattr(ser2.expanding(min_periods=5, axis=0), func)()
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = getattr(ser.expanding(min_periods=0, axis=0), func)()
result1 = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result0, result1)
result = getattr(ser.expanding(min_periods=1, axis=0), func)()
tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))
def test_expanding_apply(engine_and_raw, frame_or_series):
engine, raw = engine_and_raw
data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))
result = data.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isinstance(result, frame_or_series)
if frame_or_series is Series:
tm.assert_almost_equal(result[9], np.mean(data[:11]))
else:
tm.assert_series_equal(result.iloc[9], np.mean(data[:11]), check_names=False)
def test_expanding_min_periods_apply(engine_and_raw):
engine, raw = engine_and_raw
ser = Series(np.random.randn(50))
result = ser.expanding(min_periods=30).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert result[:29].isna().all()
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
# min_periods is working correctly
result = ser.expanding(min_periods=15).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result.iloc[13])
assert notna(result.iloc[14])
ser2 = Series(np.random.randn(20))
result = ser2.expanding(min_periods=5).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
assert isna(result[3])
assert notna(result[4])
# min_periods=0
result0 = ser.expanding(min_periods=0).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
result1 = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result0, result1)
result = ser.expanding(min_periods=1).apply(
lambda x: x.mean(), raw=raw, engine=engine
)
tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum])
def test_expanding_apply_consistency_sum_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_expanding_apply_consistency_sum_no_nans(consistency_data, min_periods, f):
x, is_constant, no_nans = consistency_data
if no_nans:
if f is np.nansum and min_periods == 0:
pass
else:
expanding_f_result = x.expanding(min_periods=min_periods).sum()
expanding_apply_f_result = x.expanding(min_periods=min_periods).apply(
func=f, raw=True
)
tm.assert_equal(expanding_f_result, expanding_apply_f_result)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
mean_x = x.expanding(min_periods=min_periods).mean()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
if ddof == 0:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_moments_consistency_var_constant(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding(min_periods=min_periods).count()
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if ddof == 1:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_std(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_cov(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
assert not (var_x < 0).any().any()
cov_x_x = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("ddof", [0, 1])
def test_expanding_consistency_series_cov_corr(consistency_data, min_periods, ddof):
x, is_constant, no_nans = consistency_data
if isinstance(x, Series):
var_x_plus_y = (x + x).expanding(min_periods=min_periods).var(ddof=ddof)
var_x = x.expanding(min_periods=min_periods).var(ddof=ddof)
var_y = x.expanding(min_periods=min_periods).var(ddof=ddof)
cov_x_y = x.expanding(min_periods=min_periods).cov(x, ddof=ddof)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
tm.assert_equal(cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y))
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
corr_x_y = x.expanding(min_periods=min_periods).corr(x)
std_x = x.expanding(min_periods=min_periods).std(ddof=ddof)
std_y = x.expanding(min_periods=min_periods).std(ddof=ddof)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if ddof == 0:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_x = x.expanding(min_periods=min_periods).mean()
mean_y = x.expanding(min_periods=min_periods).mean()
mean_x_times_y = (x * x).expanding(min_periods=min_periods).mean()
tm.assert_equal(cov_x_y, mean_x_times_y - (mean_x * mean_y))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_mean(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
result = x.expanding(min_periods=min_periods).mean()
expected = (
x.expanding(min_periods=min_periods).sum()
/ x.expanding(min_periods=min_periods).count()
)
tm.assert_equal(result, expected.astype("float64"))
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_constant(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
if is_constant:
count_x = x.expanding().count()
mean_x = x.expanding(min_periods=min_periods).mean()
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = x.expanding(min_periods=min_periods).corr(x)
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4])
def test_expanding_consistency_var_debiasing_factors(consistency_data, min_periods):
x, is_constant, no_nans = consistency_data
# check variance debiasing factors
var_unbiased_x = x.expanding(min_periods=min_periods).var()
var_biased_x = x.expanding(min_periods=min_periods).var(ddof=0)
var_debiasing_factors_x = x.expanding().count() / (
x.expanding().count() - 1.0
).replace(0.0, np.nan)
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
@pytest.mark.parametrize(
"f",
[
lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)),
lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),
],
)
def test_moment_functions_zero_length_pairwise(f):
df1 = DataFrame()
df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))
df2["a"] = df2["a"].astype("float64")
df1_expected = DataFrame(
index=MultiIndex.from_product([df1.index, df1.columns]), columns=Index([])
)
df2_expected = DataFrame(
index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),
columns=Index(["a"], name="foo"),
dtype="float64",
)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
@pytest.mark.parametrize(
"f",
[
lambda x: x.expanding().count(),
lambda x: x.expanding(min_periods=5).cov(x, pairwise=False),
lambda x: x.expanding(min_periods=5).corr(x, pairwise=False),
lambda x: x.expanding(min_periods=5).max(),
lambda x: x.expanding(min_periods=5).min(),
lambda x: x.expanding(min_periods=5).sum(),
lambda x: x.expanding(min_periods=5).mean(),
lambda x: x.expanding(min_periods=5).std(),
lambda x: x.expanding(min_periods=5).var(),
lambda x: x.expanding(min_periods=5).skew(),
lambda x: x.expanding(min_periods=5).kurt(),
lambda x: x.expanding(min_periods=5).quantile(0.5),
lambda x: x.expanding(min_periods=5).median(),
lambda x: x.expanding(min_periods=5).apply(sum, raw=False),
lambda x: x.expanding(min_periods=5).apply(sum, raw=True),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = DataFrame(columns=["a"])
df2["a"] = df2["a"].astype("float64")
df2_expected = df2
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df1_result = f(df1)
tm.assert_frame_equal(df1_result, df1_expected)
df2_result = f(df2)
tm.assert_frame_equal(df2_result, df2_expected)
def test_expanding_apply_empty_series(engine_and_raw):
engine, raw = engine_and_raw
ser = Series([], dtype=np.float64)
tm.assert_series_equal(
ser, ser.expanding().apply(lambda x: x.mean(), raw=raw, engine=engine)
)
def test_expanding_apply_min_periods_0(engine_and_raw):
# GH 8080
engine, raw = engine_and_raw
s = Series([None, None, None])
result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw, engine=engine)
expected = Series([1.0, 2.0, 3.0])
tm.assert_series_equal(result, expected)
def test_expanding_cov_diff_index():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().cov(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().cov(s2)
expected = Series([None, None, None, 4.5])
tm.assert_series_equal(result, expected)
def test_expanding_corr_diff_index():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.expanding().corr(s2)
expected = Series([None, None, 1.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.expanding().corr(s2a)
tm.assert_series_equal(result, expected)
s1 = Series([7, 8, 10], index=[0, 1, 3])
s2 = Series([7, 9, 10], index=[0, 2, 3])
result = s1.expanding().corr(s2)
expected = Series([None, None, None, 1.0])
tm.assert_series_equal(result, expected)
def test_expanding_cov_pairwise_diff_length():
# GH 7512
df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo"))
df1a = DataFrame(
[[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo")
)
df2 = DataFrame(
[[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo")
)
df2a = DataFrame(
[[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo")
)
# TODO: xref gh-15826
# .loc is not preserving the names
result1 = df1.expanding().cov(df2, pairwise=True).loc[2]
result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]
result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]
expected = DataFrame(
[[-3.0, -6.0], [-5.0, -10.0]],
columns=Index(["A", "B"], name="foo"),
index=Index(["X", "Y"], name="foo"),
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_corr_pairwise_diff_length():
# GH 7512
df1 = DataFrame(
[[1, 2], [3, 2], [3, 4]], columns=["A", "B"], index=Index(range(3), name="bar")
)
df1a = DataFrame(
[[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"]
)
df2 = DataFrame(
[[5, 6], [None, None], [2, 1]],
columns=["X", "Y"],
index=Index(range(3), name="bar"),
)
df2a = DataFrame(
[[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"]
)
result1 = df1.expanding().corr(df2, pairwise=True).loc[2]
result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]
result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]
result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]
expected = DataFrame(
[[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"])
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected)
tm.assert_frame_equal(result4, expected)
def test_expanding_apply_args_kwargs(engine_and_raw):
def mean_w_arg(x, const):
return np.mean(x) + const
engine, raw = engine_and_raw
df = DataFrame(np.random.rand(20, 3))
expected = df.expanding().apply(np.mean, engine=engine, raw=raw) + 20.0
result = df.expanding().apply(mean_w_arg, engine=engine, raw=raw, args=(20,))
tm.assert_frame_equal(result, expected)
result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20})
tm.assert_frame_equal(result, expected)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Sequence, Tuple
from unittest import TestCase, mock
from unittest.mock import PropertyMock
import pytest
from google.api_core.retry import Retry
from google.cloud.exceptions import NotFound
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.cloud.redis_v1.types import Instance
from airflow import version
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.cloud_memorystore import (
CloudMemorystoreHook,
CloudMemorystoreMemcachedHook,
)
from tests.providers.google.cloud.utils.base_gcp_mock import (
GCP_PROJECT_ID_HOOK_UNIT_TEST,
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
TEST_GCP_CONN_ID = "test-gcp-conn-id" # type: str
TEST_DELEGATE_TO = "test-delegate-to" # type: str
TEST_LOCATION = "test-location" # type: str
TEST_INSTANCE_ID = "test-instance-id" # type: str
TEST_PROJECT_ID = "test-project-id" # type: str
TEST_RETRY = Retry() # type: Retry
TEST_TIMEOUT = 10 # type: float
TEST_METADATA = [("KEY", "VALUE")] # type: Sequence[Tuple[str, str]]
TEST_PAGE_SIZE = 100 # type: int
TEST_UPDATE_MASK = {"paths": ["memory_size_gb"]} # type: Dict
TEST_UPDATE_MASK_MEMCACHED = {"displayName": "updated name"} # type: Dict
TEST_PARENT = "projects/test-project-id/locations/test-location" # type: str
TEST_NAME = "projects/test-project-id/locations/test-location/instances/test-instance-id" # type: str
TEST_PARENT_DEFAULT_PROJECT_ID = "projects/{}/locations/test-location".format(
GCP_PROJECT_ID_HOOK_UNIT_TEST
) # type: str
TEST_NAME_DEFAULT_PROJECT_ID = "projects/{}/locations/test-location/instances/test-instance-id".format(
GCP_PROJECT_ID_HOOK_UNIT_TEST
) # type: str
class TestCloudMemorystoreWithDefaultProjectIdHook(TestCase):
def setUp(
self,
):
with mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudMemorystoreHook(gcp_conn_id="test")
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_create_instance_when_exists(self, mock_get_conn, mock_project_id):
mock_get_conn.return_value.get_instance.return_value = Instance(name=TEST_NAME)
result = self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=Instance(name=TEST_NAME),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.assert_called_once_with(
request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert Instance(name=TEST_NAME) == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_create_instance_when_not_exists(self, mock_get_conn, mock_project_id):
mock_get_conn.return_value.get_instance.side_effect = [
NotFound("Instance not found"),
Instance(name=TEST_NAME),
]
mock_get_conn.return_value.create_instance.return_value.result.return_value = Instance(name=TEST_NAME)
result = self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=Instance(name=TEST_NAME),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.has_calls(
[
mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA),
mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA),
]
)
mock_get_conn.return_value.create_instance.assert_called_once_with(
request=dict(
parent=TEST_PARENT_DEFAULT_PROJECT_ID,
instance=Instance(
name=TEST_NAME,
labels={"airflow-version": "v" + version.version.replace(".", "-").replace("+", "-")},
),
instance_id=TEST_INSTANCE_ID,
),
metadata=TEST_METADATA,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
)
assert Instance(name=TEST_NAME) == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_delete_instance(self, mock_get_conn, mock_project_id):
self.hook.delete_instance(
location=TEST_LOCATION,
instance=TEST_INSTANCE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_instance.assert_called_once_with(
request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_get_instance(self, mock_get_conn, mock_project_id):
self.hook.get_instance(
location=TEST_LOCATION,
instance=TEST_INSTANCE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.assert_called_once_with(
request=dict(name=TEST_NAME_DEFAULT_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_list_instances(self, mock_get_conn, mock_project_id):
self.hook.list_instances(
location=TEST_LOCATION,
page_size=TEST_PAGE_SIZE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.list_instances.assert_called_once_with(
request=dict(parent=TEST_PARENT_DEFAULT_PROJECT_ID, page_size=TEST_PAGE_SIZE),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_update_instance(self, mock_get_conn, mock_project_id):
self.hook.update_instance(
update_mask=TEST_UPDATE_MASK,
instance=Instance(name=TEST_NAME),
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_instance.assert_called_once_with(
request=dict(update_mask=TEST_UPDATE_MASK, instance=Instance(name=TEST_NAME_DEFAULT_PROJECT_ID)),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
class TestCloudMemorystoreWithoutDefaultProjectIdHook(TestCase):
def setUp(
self,
):
with mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudMemorystoreHook(gcp_conn_id="test")
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_create_instance_when_exists(self, mock_get_conn):
mock_get_conn.return_value.get_instance.return_value = Instance(name=TEST_NAME)
result = self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=Instance(name=TEST_NAME),
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.assert_called_once_with(
request=dict(name="projects/test-project-id/locations/test-location/instances/test-instance-id"),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert Instance(name=TEST_NAME) == result
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_create_instance_when_not_exists(self, mock_get_conn):
mock_get_conn.return_value.get_instance.side_effect = [
NotFound("Instance not found"),
Instance(name=TEST_NAME),
]
mock_get_conn.return_value.create_instance.return_value.result.return_value = Instance(name=TEST_NAME)
result = self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=Instance(name=TEST_NAME),
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.has_calls(
[
mock.call(
name="projects/test-project-id/locations/test-location/instances/test-instance-id",
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
),
mock.call(
name="projects/test-project-id/locations/test-location/instances/test-instance-id",
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
),
]
)
mock_get_conn.return_value.create_instance.assert_called_once_with(
request=dict(
parent=TEST_PARENT,
instance=Instance(
name=TEST_NAME,
labels={"airflow-version": "v" + version.version.replace(".", "-").replace("+", "-")},
),
instance_id=TEST_INSTANCE_ID,
),
metadata=TEST_METADATA,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
)
assert Instance(name=TEST_NAME) == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_create_instance_without_project_id(self, mock_get_conn, mock_project_id):
with pytest.raises(AirflowException):
self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=Instance(name=TEST_NAME),
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_delete_instance(self, mock_get_conn):
self.hook.delete_instance(
location=TEST_LOCATION,
instance=TEST_INSTANCE_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_instance.assert_called_once_with(
request=dict(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_delete_instance_without_project_id(self, mock_get_conn, mock_project_id):
with pytest.raises(AirflowException):
self.hook.delete_instance(
location=TEST_LOCATION,
instance=Instance(name=TEST_NAME),
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_get_instance(self, mock_get_conn):
self.hook.get_instance(
location=TEST_LOCATION,
instance=TEST_INSTANCE_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.assert_called_once_with(
request=dict(name=TEST_NAME), retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_get_instance_without_project_id(self, mock_get_conn, mock_project_id):
with pytest.raises(AirflowException):
self.hook.get_instance(
location=TEST_LOCATION,
instance=Instance(name=TEST_NAME),
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_list_instances(self, mock_get_conn):
self.hook.list_instances(
location=TEST_LOCATION,
page_size=TEST_PAGE_SIZE,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.list_instances.assert_called_once_with(
request=dict(parent=TEST_PARENT, page_size=TEST_PAGE_SIZE),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_list_instances_without_project_id(self, mock_get_conn, mock_project_id):
with pytest.raises(AirflowException):
self.hook.list_instances(
location=TEST_LOCATION,
page_size=TEST_PAGE_SIZE,
project_id=None,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_update_instance(self, mock_get_conn):
self.hook.update_instance(
update_mask=TEST_UPDATE_MASK,
instance=Instance(name=TEST_NAME),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
project_id=TEST_PROJECT_ID,
)
mock_get_conn.return_value.update_instance.assert_called_once_with(
request=dict(update_mask={'paths': ['memory_size_gb']}, instance=Instance(name=TEST_NAME)),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=None,
)
@mock.patch("airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreHook.get_conn")
def test_update_instance_without_project_id(self, mock_get_conn, mock_project_id):
with pytest.raises(AirflowException):
self.hook.update_instance(
update_mask=TEST_UPDATE_MASK,
instance=Instance(name=TEST_NAME),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
class TestCloudMemorystoreMemcachedWithDefaultProjectIdHook(TestCase):
def setUp(
self,
):
with mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudMemorystoreMemcachedHook(gcp_conn_id="test")
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn"
)
def test_create_instance_when_exists(self, mock_get_conn, mock_project_id):
mock_get_conn.return_value.get_instance.return_value = cloud_memcache.Instance(name=TEST_NAME)
result = self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=cloud_memcache.Instance(name=TEST_NAME),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.assert_called_once_with(
name=TEST_NAME_DEFAULT_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA
)
assert cloud_memcache.Instance(name=TEST_NAME) == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn"
)
def test_create_instance_when_not_exists(self, mock_get_conn, mock_project_id):
mock_get_conn.return_value.get_instance.side_effect = [
NotFound("Instance not found"),
cloud_memcache.Instance(name=TEST_NAME),
]
mock_get_conn.return_value.create_instance.return_value.result.return_value = cloud_memcache.Instance(
name=TEST_NAME
)
result = self.hook.create_instance(
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
instance=cloud_memcache.Instance(name=TEST_NAME),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.has_calls(
[
mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA),
mock.call(name=TEST_NAME, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA),
]
)
mock_get_conn.return_value.create_instance.assert_called_once_with(
resource=cloud_memcache.Instance(
name=TEST_NAME,
labels={"airflow-version": "v" + version.version.replace(".", "-").replace("+", "-")},
),
instance_id=TEST_INSTANCE_ID,
metadata=TEST_METADATA,
parent=TEST_PARENT_DEFAULT_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
)
assert cloud_memcache.Instance(name=TEST_NAME) == result
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn"
)
def test_delete_instance(self, mock_get_conn, mock_project_id):
self.hook.delete_instance(
location=TEST_LOCATION,
instance=TEST_INSTANCE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_instance.assert_called_once_with(
name=TEST_NAME_DEFAULT_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn"
)
def test_get_instance(self, mock_get_conn, mock_project_id):
self.hook.get_instance(
location=TEST_LOCATION,
instance=TEST_INSTANCE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_instance.assert_called_once_with(
name=TEST_NAME_DEFAULT_PROJECT_ID, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn"
)
def test_list_instances(self, mock_get_conn, mock_project_id):
self.hook.list_instances(
location=TEST_LOCATION,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.list_instances.assert_called_once_with(
parent=TEST_PARENT_DEFAULT_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id',
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch(
"airflow.providers.google.cloud.hooks.cloud_memorystore.CloudMemorystoreMemcachedHook.get_conn"
)
def test_update_instance(self, mock_get_conn, mock_project_id):
self.hook.update_instance(
update_mask=TEST_UPDATE_MASK_MEMCACHED,
instance=cloud_memcache.Instance(name=TEST_NAME),
location=TEST_LOCATION,
instance_id=TEST_INSTANCE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_instance.assert_called_once_with(
update_mask=TEST_UPDATE_MASK_MEMCACHED,
resource=cloud_memcache.Instance(name=TEST_NAME_DEFAULT_PROJECT_ID),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
from datetime import datetime, timedelta
import json
import logging
import mock
import os
import shutil
import tempfile
from c7n import policy, manager
from c7n.config import Config
from c7n.provider import clouds
from c7n.exceptions import ResourceLimitExceeded, PolicyValidationError
from c7n.resources import aws, load_available
from c7n.resources.aws import AWS, fake_session
from c7n.resources.ec2 import EC2
from c7n.policy import ConfigPollRuleMode, PullMode
from c7n.schema import generate, JsonSchemaValidator
from c7n.utils import dumps
from c7n.query import ConfigSource, TypeInfo
from c7n.version import version
from .common import BaseTest, event_data, Bag, load_data
class DummyResource(manager.ResourceManager):
def resources(self):
return [{"abc": 123}, {"def": 456}]
@property
def actions(self):
class _a:
def name(self):
return self.f.__name__
def __init__(self, f):
self.f = f
def process(self, resources):
return self.f(resources)
def p1(resources):
return [{"abc": 456}, {"def": 321}]
def p2(resources):
return resources
return [_a(p1), _a(p2)]
class PolicyMetaLint(BaseTest):
def setUp(self):
# we need to load all resources for the linting meta tests.
load_available()
def test_policy_missing_provider_session(self):
self.assertRaises(
RuntimeError,
policy.get_session_factory,
'nosuchthing', Bag())
def test_policy_detail_spec_permissions(self):
policy = self.load_policy(
{"name": "kinesis-delete", "resource": "kinesis", "actions": ["delete"]}
)
perms = policy.get_permissions()
self.assertEqual(
perms,
{
"kinesis:DescribeStream",
"kinesis:ListStreams",
"kinesis:DeleteStream",
},
)
def test_resource_type_repr_with_arn_type(self):
policy = self.load_policy({'name': 'ecr', 'resource': 'aws.ops-item'})
# check the repr absent a config type and cfn type but with an arn type
assert policy.resource_manager.resource_type.config_type is None
assert policy.resource_manager.resource_type.cfn_type is None
assert str(policy.resource_manager.resource_type) == '<TypeInfo AWS::Ssm::Opsitem>'
def test_resource_type_repr(self):
policy = self.load_policy({'name': 'ecr', 'resource': 'aws.ecr'})
# check the repr absent a config type but with a cfn type
assert policy.resource_manager.resource_type.config_type is None
assert str(policy.resource_manager.resource_type) == '<TypeInfo AWS::ECR::Repository>'
def test_schema_plugin_name_mismatch(self):
# todo iterate over all clouds not just aws resources
for k, v in manager.resources.items():
for fname, f in v.filter_registry.items():
if fname in ("or", "and", "not"):
continue
self.assertIn(fname, f.schema["properties"]["type"]["enum"])
for aname, a in v.action_registry.items():
self.assertIn(aname, a.schema["properties"]["type"]["enum"])
def test_schema(self):
try:
schema = generate()
JsonSchemaValidator.check_schema(schema)
except Exception:
self.fail("Invalid schema")
def test_schema_serialization(self):
try:
dumps(generate())
except Exception:
self.fail("Failed to serialize schema")
def test_resource_augment_universal_mask(self):
# universal tag had a potential bad patterm of masking
# resource augmentation, scan resources to ensure
missing = []
for k, v in manager.resources.items():
if not getattr(v.resource_type, "universal_taggable", None):
continue
if (
v.augment.__name__ == "universal_augment" and
getattr(v.resource_type, "detail_spec", None)
):
missing.append(k)
if missing:
self.fail(
"%s resource has universal augment masking resource augment" % (
', '.join(missing))
)
def test_resource_universal_taggable_arn_type(self):
missing = []
for k, v in manager.resources.items():
if not getattr(v, 'augment', None):
continue
if (
v.augment.__name__ == "universal_augment" and
v.resource_type.arn_type is None
):
missing.append(k)
if missing:
self.fail("%s universal taggable resource missing arn_type" % (
', '.join(missing)))
def test_resource_shadow_source_augment(self):
shadowed = []
bad = []
cfg = Config.empty()
for k, v in manager.resources.items():
if not getattr(v.resource_type, "config_type", None):
continue
p = Bag({"name": "permcheck", "resource": k, 'provider_name': 'aws'})
ctx = self.get_context(config=cfg, policy=p)
mgr = v(ctx, p)
source = mgr.get_source("config")
if not isinstance(source, ConfigSource):
bad.append(k)
if v.__dict__.get("augment"):
shadowed.append(k)
if shadowed:
self.fail(
"%s have resource managers shadowing source augments"
% (", ".join(shadowed))
)
if bad:
self.fail("%s have config types but no config source" % (", ".join(bad)))
def test_resource_arn_override_generator(self):
overrides = set()
for k, v in manager.resources.items():
arn_gen = bool(v.__dict__.get('get_arns') or v.__dict__.get('generate_arn'))
if arn_gen:
overrides.add(k)
overrides = overrides.difference(
{'account', 's3', 'hostedzone', 'log-group', 'rest-api', 'redshift-snapshot',
'rest-stage'})
if overrides:
raise ValueError("unknown arn overrides in %s" % (", ".join(overrides)))
def test_resource_name(self):
names = []
for k, v in manager.resources.items():
if not getattr(v.resource_type, "name", None):
names.append(k)
if names:
self.fail("%s dont have resource name for reporting" % (", ".join(names)))
def test_ec2_id_prefix(self):
missing_prefix = []
for k, v in manager.resources.items():
if v.resource_type.service != 'ec2':
continue
if v.resource_type.id_prefix is None:
missing_prefix.append(k)
if missing_prefix:
self.fail('ec2 resources missing id prefix %s' % (', '.join(missing_prefix)))
def test_cfn_resource_validity(self):
# for resources which are annotated with cfn_type ensure that it is
# a valid type.
resource_cfn_types = set()
for k, v in manager.resources.items():
rtype = v.resource_type.cfn_type
if rtype is not None:
resource_cfn_types.add(rtype)
cfn_types = set(load_data('cfn-types.json'))
missing = set()
for rtype in resource_cfn_types:
if rtype not in cfn_types:
missing.add(rtype)
if missing:
raise AssertionError("Bad cfn types:\n %s" % (
"\n".join(sorted(missing))))
def test_securityhub_resource_support(self):
session = fake_session()._session
model = session.get_service_model('securityhub')
shape = model.shape_for('ResourceDetails')
mangled_hub_types = set(shape.members.keys())
resource_hub_types = set()
whitelist = set(('AwsS3Object', 'Container'))
todo = set((
# newer wave q1 2021,
'AwsS3AccountPublicAccessBlock',
'AwsSsmPatchCompliance',
# newer wave q4 2020
'AwsApiGatewayRestApi',
'AwsApiGatewayStage',
'AwsApiGatewayV2Api',
'AwsApiGatewayV2Stage',
'AwsCertificateManagerCertificate',
'AwsCloudTrailTrail',
'AwsElbLoadBalancer',
'AwsIamGroup',
'AwsRedshiftCluster',
# newer wave q3 2020
'AwsDynamoDbTable',
'AwsEc2Eip',
'AwsIamPolicy',
'AwsIamUser',
'AwsRdsDbCluster',
'AwsRdsDbClusterSnapshot',
'AwsRdsDbSnapshot',
'AwsSecretsManagerSecret',
# older wave
'AwsRdsDbInstance',
'AwsElbv2LoadBalancer',
'AwsEc2SecurityGroup',
'AwsIamAccessKey',
'AwsEc2NetworkInterface',
'AwsWafWebAcl'))
mangled_hub_types = mangled_hub_types.difference(whitelist).difference(todo)
for k, v in manager.resources.items():
finding = v.action_registry.get('post-finding')
if finding:
resource_hub_types.add(finding.resource_type)
assert mangled_hub_types.difference(resource_hub_types) == set()
def test_config_resource_support(self):
# for several of these we express support as filter or action instead
# of a resource.
whitelist = {
'AWS::Config::ConformancePackCompliance',
'AWS::NetworkFirewall::FirewallPolicy',
'AWS::NetworkFirewall::Firewall',
'AWS::NetworkFirewall::RuleGroup',
'AWS::EC2::RegisteredHAInstance',
'AWS::EC2::EgressOnlyInternetGateway',
'AWS::EC2::VPCEndpointService',
'AWS::EC2::FlowLog',
'AWS::ECS::TaskDefinition',
'AWS::RDS::DBSecurityGroup',
'AWS::RDS::EventSubscription',
'AWS::S3::AccountPublicAccessBlock',
'AWS::Redshift::ClusterParameterGroup',
'AWS::Redshift::ClusterSecurityGroup',
'AWS::Redshift::EventSubscription',
'AWS::SSM::ManagedInstanceInventory',
'AWS::AutoScaling::ScalingPolicy',
'AWS::AutoScaling::ScheduledAction',
'AWS::WAF::RateBasedRule',
'AWS::WAF::Rule',
'AWS::WAF::RuleGroup',
'AWS::WAFRegional::RateBasedRule',
'AWS::WAFRegional::Rule',
'AWS::WAFRegional::RuleGroup',
'AWS::ElasticBeanstalk::ApplicationVersion',
'AWS::WAFv2::WebACL',
'AWS::WAFv2::RuleGroup',
'AWS::WAFv2::IPSet',
'AWS::WAFv2::RegexPatternSet',
'AWS::WAFv2::ManagedRuleSet',
'AWS::XRay::EncryptionConfig',
'AWS::SSM::AssociationCompliance',
'AWS::SSM::PatchCompliance',
'AWS::ShieldRegional::Protection',
'AWS::Config::ResourceCompliance',
'AWS::ApiGatewayV2::Stage',
'AWS::ApiGatewayV2::Api',
'AWS::ServiceCatalog::CloudFormationProvisionedProduct',
'AWS::ServiceCatalog::CloudFormationProduct',
'AWS::SSM::FileData'}
resource_map = {}
for k, v in manager.resources.items():
if not v.resource_type.config_type:
continue
resource_map[v.resource_type.config_type] = v
resource_config_types = set(resource_map)
session = fake_session()._session
model = session.get_service_model('config')
shape = model.shape_for('ResourceType')
config_types = set(shape.enum).difference(whitelist)
missing = config_types.difference(resource_config_types)
if missing:
raise AssertionError(
"Missing config types \n %s" % ('\n'.join(missing)))
# config service can't be bothered to update their sdk correctly
invalid_ignore = {
'AWS::EKS::Cluster',
'AWS::ECS::Service',
'AWS::ECS::TaskDefinition',
'AWS::NetworkFirewall::Firewall'
}
bad_types = resource_config_types.difference(config_types)
bad_types = bad_types.difference(invalid_ignore)
if bad_types:
raise AssertionError(
"Invalid config types \n %s" % ('\n'.join(bad_types)))
def test_resource_meta_with_class(self):
missing = set()
for k, v in manager.resources.items():
if k in ('rest-account', 'account'):
continue
if not issubclass(v.resource_type, TypeInfo):
missing.add(k)
if missing:
raise SyntaxError("missing type info class %s" % (', '.join(missing)))
def test_resource_type_empty_metadata(self):
empty = set()
for k, v in manager.resources.items():
if k in ('rest-account', 'account'):
continue
for rk, rv in v.resource_type.__dict__.items():
if rk[0].isalnum() and rv is None:
empty.add(k)
if empty:
raise ValueError("Empty Resource Metadata %s" % (', '.join(empty)))
def test_resource_legacy_type(self):
legacy = set()
marker = object()
for k, v in manager.resources.items():
if getattr(v.resource_type, 'type', marker) is not marker:
legacy.add(k)
if legacy:
raise SyntaxError("legacy arn type info %s" % (', '.join(legacy)))
def _visit_filters_and_actions(self, visitor):
names = []
for cloud_name, cloud in clouds.items():
for resource_name, resource in cloud.resources.items():
for fname, f in resource.filter_registry.items():
if fname in ('and', 'or', 'not'):
continue
if visitor(f):
names.append("%s.%s.filters.%s" % (
cloud_name, resource_name, fname))
for aname, a in resource.action_registry.items():
if visitor(a):
names.append('%s.%s.actions.%s' % (
cloud_name, resource_name, aname))
return names
def test_filter_action_additional(self):
def visitor(e):
if e.type == 'notify':
return
return e.schema.get('additionalProperties', True) is True
names = self._visit_filters_and_actions(visitor)
if names:
self.fail(
"missing additionalProperties: False on actions/filters\n %s" % (
" \n".join(names)))
def test_filter_action_type(self):
def visitor(e):
return 'type' not in e.schema['properties']
names = self._visit_filters_and_actions(visitor)
if names:
self.fail("missing type on actions/filters\n %s" % (" \n".join(names)))
def test_resource_arn_info(self):
missing = []
whitelist_missing = {
'rest-stage', 'rest-resource', 'rest-vpclink', 'rest-client-certificate'}
explicit = []
whitelist_explicit = {
'rest-account', 'shield-protection', 'shield-attack',
'dlm-policy', 'efs', 'efs-mount-target', 'gamelift-build',
'glue-connection', 'glue-dev-endpoint', 'cloudhsm-cluster',
'snowball-cluster', 'snowball', 'ssm-activation',
'healthcheck', 'event-rule-target',
'support-case', 'transit-attachment', 'config-recorder'}
missing_method = []
for k, v in manager.resources.items():
rtype = getattr(v, 'resource_type', None)
if not v.has_arn():
missing_method.append(k)
if rtype is None:
continue
if v.__dict__.get('get_arns'):
continue
if getattr(rtype, 'arn', None) is False:
explicit.append(k)
if getattr(rtype, 'arn', None) is not None:
continue
if getattr(rtype, 'type', None) is not None:
continue
if getattr(rtype, 'arn_type', None) is not None:
continue
missing.append(k)
self.assertEqual(
set(missing).union(explicit),
set(missing_method))
missing = set(missing).difference(whitelist_missing)
if missing:
self.fail(
"%d resources %s are missing arn type info" % (
len(missing), ", ".join(missing)))
explicit = set(explicit).difference(whitelist_explicit)
if explicit:
self.fail(
"%d resources %s dont have arn type info exempted" % (
len(explicit), ", ".join(explicit)))
def test_resource_permissions(self):
self.capture_logging("c7n.cache")
missing = []
cfg = Config.empty()
for k, v in list(manager.resources.items()):
p = Bag({"name": "permcheck", "resource": k, 'provider_name': 'aws'})
ctx = self.get_context(config=cfg, policy=p)
mgr = v(ctx, p)
perms = mgr.get_permissions()
if not perms:
missing.append(k)
for n, a in list(v.action_registry.items()):
p["actions"] = [n]
perms = a({}, mgr).get_permissions()
found = bool(perms)
if not isinstance(perms, (list, tuple, set)):
found = False
if "webhook" == n:
continue
if not found:
missing.append("%s.actions.%s" % (k, n))
for n, f in list(v.filter_registry.items()):
if n in ("and", "or", "not", "missing", "reduce"):
continue
p["filters"] = [n]
perms = f({}, mgr).get_permissions()
if not isinstance(perms, (tuple, list, set)):
missing.append("%s.filters.%s" % (k, n))
# in memory filters
if n in (
"event",
"value",
"tag-count",
"marked-for-op",
"offhour",
"onhour",
"age",
"state-age",
"egress",
"ingress",
"capacity-delta",
"is-ssl",
"global-grants",
"missing-policy-statement",
"missing-statement",
"healthcheck-protocol-mismatch",
"image-age",
"has-statement",
"no-access",
"instance-age",
"ephemeral",
"instance-uptime",
):
continue
qk = "%s.filters.%s" % (k, n)
if qk in ("route-table.filters.route",):
continue
if not perms:
missing.append(qk)
if missing:
self.fail(
"Missing permissions %d on \n\t%s"
% (len(missing), "\n\t".join(sorted(missing)))
)
class PolicyMeta(BaseTest):
def test_policy_detail_spec_permissions(self):
policy = self.load_policy(
{"name": "kinesis-delete",
"resource": "kinesis",
"actions": ["delete"]}
)
perms = policy.get_permissions()
self.assertEqual(
perms,
{
"kinesis:DescribeStream",
"kinesis:ListStreams",
"kinesis:DeleteStream",
},
)
def test_policy_manager_custom_permissions(self):
policy = self.load_policy(
{
"name": "ec2-utilization",
"resource": "ec2",
"filters": [
{
"type": "metrics",
"name": "CPUUtilization",
"days": 3,
"value": 1.5,
}
],
}
)
perms = policy.get_permissions()
self.assertEqual(
perms,
{
"ec2:DescribeInstances",
"ec2:DescribeTags",
"cloudwatch:GetMetricStatistics",
},
)
class TestPolicyCollection(BaseTest):
def test_expand_partitions(self):
cfg = Config.empty(regions=["us-gov-west-1", "cn-north-1", "us-west-2"])
original = policy.PolicyCollection.from_data(
{"policies": [{"name": "foo", "resource": "ec2"}]}, cfg
)
collection = AWS().initialize_policies(original, cfg)
self.assertEqual(
sorted([p.options.region for p in collection]),
["cn-north-1", "us-gov-west-1", "us-west-2"],
)
def test_policy_expand_group_region(self):
cfg = Config.empty(regions=["us-east-1", "us-east-2", "us-west-2"])
original = policy.PolicyCollection.from_data(
{"policies": [
{"name": "bar", "resource": "lambda"},
{"name": "middle", "resource": "security-group"},
{"name": "foo", "resource": "ec2"}]},
cfg)
collection = AWS().initialize_policies(original, cfg)
self.assertEqual(
[(p.name, p.options.region) for p in collection],
[('bar', 'us-east-1'),
('middle', 'us-east-1'),
('foo', 'us-east-1'),
('bar', 'us-east-2'),
('middle', 'us-east-2'),
('foo', 'us-east-2'),
('bar', 'us-west-2'),
('middle', 'us-west-2'),
('foo', 'us-west-2')])
def test_policy_region_expand_global(self):
factory = self.replay_flight_data('test_aws_policy_global_expand')
self.patch(aws, '_profile_session', factory())
original = self.policy_loader.load_data(
{"policies": [
{"name": "foo", "resource": "s3"},
{"name": "iam", "resource": "iam-user"}]},
'memory://',
config=Config.empty(regions=["us-east-1", "us-west-2"]),
)
collection = AWS().initialize_policies(
original, Config.empty(regions=["all"], output_dir="/test/output/"))
self.assertEqual(len(collection.resource_types), 2)
s3_regions = [p.options.region for p in collection if p.resource_type == "s3"]
self.assertTrue("us-east-1" in s3_regions)
self.assertTrue("us-east-2" in s3_regions)
iam = [p for p in collection if p.resource_type == "iam-user"]
self.assertEqual(len(iam), 1)
self.assertEqual(iam[0].options.region, "us-east-1")
self.assertEqual(iam[0].options.output_dir, "/test/output/us-east-1")
# Don't append region when it's already in the path.
collection = AWS().initialize_policies(
original, Config.empty(regions=["all"], output_dir="/test/{region}/output/"))
self.assertEqual(len(collection.resource_types), 2)
iam = [p for p in collection if p.resource_type == "iam-user"]
self.assertEqual(iam[0].options.region, "us-east-1")
self.assertEqual(iam[0].options.output_dir, "/test/{region}/output")
collection = AWS().initialize_policies(
original, Config.empty(regions=["eu-west-1", "eu-west-2"], output_dir="/test/output/")
)
iam = [p for p in collection if p.resource_type == "iam-user"]
self.assertEqual(len(iam), 1)
self.assertEqual(iam[0].options.region, "eu-west-1")
self.assertEqual(iam[0].options.output_dir, "/test/output/eu-west-1")
self.assertEqual(len(collection), 3)
class TestPolicy(BaseTest):
def test_policy_variable_precedent(self):
p = self.load_policy({
'name': 'compute',
'resource': 'aws.ec2'},
config={'account_id': '00100100'})
v = p.get_variables({'account_id': 'foobar',
'charge_code': 'oink'})
self.assertEqual(v['account_id'], '00100100')
self.assertEqual(v['charge_code'], 'oink')
def test_policy_with_role_complete(self):
p = self.load_policy({
'name': 'compute',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'member-role': 'arn:aws:iam::{account_id}:role/BarFoo',
'role': 'arn:aws:iam::{account_id}:role/FooBar'},
'actions': [
{'type': 'tag',
'value': 'bad monkey {account_id} {region} {now:+2d%Y-%m-%d}'},
{'type': 'notify',
'to': ['[email protected]'],
'transport': {
'type': 'sns',
'topic': 'arn:::::',
},
'subject': "S3 - Cross-Account -[custodian {{ account }} - {{ region }}]"},
]}, config={'account_id': '12312311', 'region': 'zanzibar'})
assert p.get_execution_mode().get_permissions() == ()
p.expand_variables(p.get_variables())
self.assertEqual(p.data['mode']['role'], 'arn:aws:iam::12312311:role/FooBar')
def test_policy_variable_interpolation(self):
p = self.load_policy({
'name': 'compute',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'member-role': 'arn:aws:iam::{account_id}:role/BarFoo',
'role': 'FooBar'},
'actions': [
{'type': 'tag',
'value': 'bad monkey {account_id} {region} {now:+2d%Y-%m-%d}'},
{'type': 'notify',
'to': ['[email protected]'],
'transport': {
'type': 'sns',
'topic': 'arn:::::',
},
'subject': "S3 - Cross-Account -[custodian {{ account }} - {{ region }}]"},
]}, config={'account_id': '12312311', 'region': 'zanzibar'})
ivalue = 'bad monkey 12312311 zanzibar %s' % (
(datetime.utcnow() + timedelta(2)).strftime('%Y-%m-%d'))
p.expand_variables(p.get_variables())
self.assertEqual(p.data['actions'][0]['value'], ivalue)
self.assertEqual(
p.data['actions'][1]['subject'],
"S3 - Cross-Account -[custodian {{ account }} - {{ region }}]")
self.assertEqual(p.data['mode']['role'], 'arn:aws:iam::12312311:role/FooBar')
self.assertEqual(p.data['mode']['member-role'], 'arn:aws:iam::{account_id}:role/BarFoo')
self.assertEqual(p.resource_manager.actions[0].data['value'], ivalue)
def test_child_resource_trail_validation(self):
self.assertRaises(
ValueError,
self.load_policy,
{
"name": "api-resources",
"resource": "rest-resource",
"mode": {
"type": "cloudtrail",
"events": [
{
"source": "apigateway.amazonaws.com",
"event": "UpdateResource",
"ids": "requestParameter.stageName",
}
],
},
},
)
def test_load_policy_validation_error(self):
invalid_policies = {
"policies": [
{
"name": "foo",
"resource": "s3",
"filters": [{"tag:custodian_tagging": "not-null"}],
"actions": [
{"type": "untag", "tags": {"custodian_cleanup": "yes"}}
],
}
]
}
self.assertRaises(Exception, self.load_policy_set, invalid_policies)
def test_policy_validation(self):
policy = self.load_policy(
{
"name": "ec2-utilization",
"resource": "ec2",
"tags": ["abc"],
"filters": [
{
"type": "metrics",
"name": "CPUUtilization",
"days": 3,
"value": 1.5,
}
],
"actions": ["stop"],
}
)
policy.validate()
self.assertEqual(policy.tags, ["abc"])
self.assertFalse(policy.is_lambda)
self.assertTrue(
repr(policy).startswith("<Policy resource:ec2 name:ec2-utilization")
)
def test_policy_name_and_resource_type_filtering(self):
collection = self.load_policy_set(
{
"policies": [
{"name": "s3-remediate", "resource": "s3"},
{"name": "s3-global-grants", "resource": "s3"},
{"name": "ec2-tag-compliance-stop", "resource": "ec2"},
{"name": "ec2-tag-compliance-kill", "resource": "ec2"},
{"name": "ec2-tag-compliance-remove", "resource": "ec2"},
]
}
)
self.assertIn("s3-remediate", collection)
self.assertNotIn("s3-argle-bargle", collection)
# Make sure __iter__ works
for p in collection:
self.assertTrue(p.name is not None)
self.assertEqual(collection.resource_types, {"s3", "ec2"})
self.assertTrue("s3-remediate" in collection)
self.assertEqual(
[p.name for p in collection.filter(["s3*"])],
["s3-remediate", "s3-global-grants"],
)
self.assertEqual(
[p.name for p in collection.filter(["ec2*"])],
[
"ec2-tag-compliance-stop",
"ec2-tag-compliance-kill",
"ec2-tag-compliance-remove",
],
)
self.assertEqual(
[p.name for p in collection.filter(["ec2*", "s3*"])],
[p.name for p in collection],
)
self.assertEqual(
[p.name for p in collection.filter(resource_types=["ec2"])],
[
"ec2-tag-compliance-stop",
"ec2-tag-compliance-kill",
"ec2-tag-compliance-remove",
],
)
self.assertEqual(
[p.name for p in collection.filter(resource_types=["ec2", "s3"])],
[p.name for p in collection],
)
self.assertEqual(
[p.name for p in collection.filter(["ec2*", "s3*"], ["ec2", "s3"])],
[p.name for p in collection],
)
self.assertEqual(
[p.name for p in collection.filter(["ec2*", "s3*"], ["s3"])],
[
"s3-remediate",
"s3-global-grants",
],
)
self.assertEqual(
[p.name for p in collection.filter(["asdf12"])],
[],
)
self.assertEqual(
[p.name for p in collection.filter(resource_types=["asdf12"])],
[],
)
def test_file_not_found(self):
self.assertRaises(IOError, policy.load, Config.empty(), "/asdf12")
def test_policy_resource_limits(self):
session_factory = self.replay_flight_data(
"test_policy_resource_limits")
p = self.load_policy(
{
"name": "log-delete",
"resource": "log-group",
"max-resources-percent": 2.5,
},
session_factory=session_factory)
p.ctx.metrics.flush = mock.MagicMock()
output = self.capture_logging('custodian.policy', level=logging.ERROR)
self.assertRaises(ResourceLimitExceeded, p.run)
self.assertEqual(
output.getvalue().strip(),
"policy:log-delete exceeded resource-limit:2.5% found:1 total:1")
self.assertEqual(
p.ctx.metrics.buf[0]['MetricName'], 'ResourceLimitExceeded')
def test_policy_resource_limits_count(self):
session_factory = self.replay_flight_data(
"test_policy_resource_count")
p = self.load_policy(
{
"name": "ecs-cluster-resource-count",
"resource": "ecs",
"max-resources": 1
},
session_factory=session_factory)
self.assertRaises(ResourceLimitExceeded, p.run)
policy = {
"name": "ecs-cluster-resource-count",
"resource": "ecs",
"max-resources": 0
}
config = Config.empty(validate=True)
self.assertRaises(
Exception,
self.load_policy,
policy,
config=config,
validate=True,
session_factory=session_factory
)
def test_policy_resource_limit_and_percent(self):
session_factory = self.replay_flight_data(
"test_policy_resource_count")
p = self.load_policy(
{
"name": "ecs-cluster-resource-count",
"resource": "ecs",
"max-resources": {
"amount": 1,
"percent": 10,
"op": "and"
}
},
session_factory=session_factory)
self.assertRaises(ResourceLimitExceeded, p.run)
p = self.load_policy(
{
"name": "ecs-cluster-resource-count",
"resource": "ecs",
"max-resources": {
"amount": 100,
"percent": 10,
"op": "and"
}
},
session_factory=session_factory)
resources = p.run()
self.assertTrue(resources)
def test_policy_resource_limits_with_filter(self):
session_factory = self.replay_flight_data(
"test_policy_resource_count_with_filter")
p = self.load_policy(
{
"name": "asg-with-image-age-resource-count",
"resource": "asg",
"max-resources": 1,
"filters": [{
"type": "image-age",
"op": "ge",
"days": 0
}]
},
session_factory=session_factory)
resources = p.run()
self.assertTrue(resources)
def test_get_resource_manager(self):
collection = self.load_policy_set(
{
"policies": [
{
"name": "query-instances",
"resource": "ec2",
"filters": [{"tag-key": "CMDBEnvironment"}],
}
]
}
)
p = collection.policies[0]
self.assertTrue(isinstance(p.load_resource_manager(), EC2))
def xtest_policy_run(self):
manager.resources.register("dummy", DummyResource)
self.addCleanup(manager.resources.unregister, "dummy")
self.output_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.output_dir)
collection = self.load_policy_set(
{"policies": [{"name": "process-instances", "resource": "dummy"}]},
{"output_dir": self.output_dir},
)
p = collection.policies[0]
p()
self.assertEqual(len(p.ctx.metrics.data), 3)
def test_validate_policy_start_stop(self):
data = {
'name': 'bad-str-parse',
'resource': 'ec2',
'start': 'asdf'
}
with self.assertRaises(ValueError):
self.load_policy(data)
data = {
'name': 'bad-non-str-parse',
'resource': 'ec2',
'start': 2
}
with self.assertRaises(Exception):
self.load_policy(data)
data = {
'name': 'bad-tz-parse',
'resource': 'ec2',
'tz': 'asdf'
}
with self.assertRaises(PolicyValidationError):
self.load_policy(data)
data = {
'name': 'bad-tz-int-parse',
'resource': 'ec2',
'tz': 2
}
with self.assertRaises(Exception):
self.load_policy(data)
data = {
'name': 'good-time-parse',
'resource': 'ec2',
'start': '4 AM'
}
p = self.load_policy(data)
result = p.validate_policy_start_stop()
self.assertEqual(result, None)
data = {
'name': 'good-tz-str-parse',
'resource': 'ec2',
'tz': 'UTC'
}
p = self.load_policy(data)
result = p.validate_policy_start_stop()
self.assertEqual(result, None)
class PolicyConditionsTest(BaseTest):
def test_value_from(self):
tmp_dir = self.change_cwd()
p = self.load_policy({
'name': 'fx',
'resource': 'aws.ec2',
'conditions': [{
'type': 'value',
'key': 'account_id',
'op': 'in',
'value_from': {
'url': 'file:///{}/accounts.txt'.format(tmp_dir),
'type': 'txt'}
}]
})
with open(os.path.join(tmp_dir, 'accounts.txt'), 'w') as fh:
fh.write(p.ctx.options.account_id)
self.assertTrue(p.is_runnable())
def test_env_var_extension(self):
p = self.load_policy({
'name': 'profx',
'resource': 'aws.ec2',
'conditions': [{
'type': 'value',
'key': 'account.name',
'value': 'deputy'}]})
p.conditions.env_vars['account'] = {'name': 'deputy'}
self.assertTrue(p.is_runnable())
p.conditions.env_vars['account'] = {'name': 'mickey'}
self.assertFalse(p.is_runnable())
def test_event_filter(self):
p = self.load_policy({
'name': 'profx',
'resource': 'aws.ec2',
'conditions': [{
'type': 'event',
'key': 'detail.userIdentity.userName',
'value': 'deputy'}]})
self.assertTrue(
p.conditions.evaluate(
{'detail': {'userIdentity': {'userName': 'deputy'}}}))
# event filters pass if we don't have an event.
self.assertTrue(p.is_runnable())
self.assertFalse(p.is_runnable({}))
self.assertFalse(p.is_runnable(
{'detail': {'userIdentity': {'userName': 'mike'}}}))
def test_boolean_or_blocks(self):
p = self.load_policy({
'name': 'magenta',
'resource': 'aws.codebuild',
'conditions': [{
'or': [
{'region': 'us-east-1'},
{'region': 'us-west-2'}]}]})
self.assertTrue(p.is_runnable())
def test_boolean_and_blocks(self):
p = self.load_policy({
'name': 'magenta',
'resource': 'aws.codebuild',
'conditions': [{
'and': [
{'region': 'us-east-1'},
{'region': 'us-west-2'}]}]})
self.assertFalse(p.is_runnable())
def test_boolean_not_blocks(self):
p = self.load_policy({
'name': 'magenta',
'resource': 'aws.codebuild',
'conditions': [{
'not': [
{'region': 'us-east-1'}]}]})
self.assertFalse(p.is_runnable())
def test_dryrun_event_filter(self):
pdata = {
'name': 'manga',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'role': 'something'
},
'filters': [{
'not': [
{'type': 'event'}
]
}]
}
self.patch(PullMode, 'run', lambda self: [True])
p = self.load_policy(
deepcopy(pdata), config={'dryrun': True})
results = p.run()
self.assertEqual(results, [True])
self.assertTrue(p.is_runnable())
self.assertEqual(pdata, p.data)
def test_boolean_not_event(self):
# event is bound to execution evaluation, when
# evaluating conditions for provisioning we
# strip any event filters.
pdata = {
'name': 'manga',
'resource': 'aws.ec2',
'conditions': [{
'or': [
{'not': [
{'type': 'event'}]}]}]}
p = self.load_policy(pdata)
p._trim_runtime_filters()
self.assertTrue(p.is_runnable())
self.assertFalse(p.conditions.filters)
self.assertEqual(p.data, pdata)
class PolicyExecutionModeTest(BaseTest):
def test_run_unimplemented(self):
self.assertRaises(NotImplementedError, policy.PolicyExecutionMode({}).run)
def test_get_logs_unimplemented(self):
self.assertRaises(
NotImplementedError, policy.PolicyExecutionMode({}).get_logs, 1, 2
)
class LambdaModeTest(BaseTest):
def test_tags_validation(self):
log_file = self.capture_logging('c7n.policy', level=logging.INFO)
self.load_policy({
'name': 'foobar',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'tags': {
'custodian-mode': 'xyz',
'xyz': 'bar'}
}},
validate=True)
lines = log_file.getvalue().strip().split('\n')
self.assertEqual(
lines[0],
('Custodian reserves policy lambda tags starting with '
'custodian - policy specifies custodian-mode'))
def test_tags_injection(self):
p = self.load_policy({
'name': 'foobar',
'resource': 'aws.ec2',
'mode': {
'type': 'config-rule',
'tags': {
'xyz': 'bar'}
}},
validate=True)
from c7n import mu
policy_lambda = []
def publish(self, func, alias=None, role=None, s3_uri=None):
policy_lambda.append(func)
self.patch(mu.LambdaManager, 'publish', publish)
p.provision()
self.assertEqual(
policy_lambda[0].tags['custodian-info'],
'mode=config-rule:version=%s' % version)
class PullModeTest(BaseTest):
def test_skip_when_region_not_equal(self):
log_file = self.capture_logging("custodian.policy")
policy_name = "rds-test-policy"
p = self.load_policy(
{
"name": policy_name,
"resource": "rds",
"region": "us-east-1",
"filters": [{"type": "default-vpc"}],
},
config={"region": "us-west-2"},
session_factory=None,
)
p.run()
lines = log_file.getvalue().strip().split("\n")
self.assertIn(
"Skipping policy:{} due to execution conditions".format(
policy_name
),
lines,
)
def test_is_runnable_mismatch_region(self):
p = self.load_policy(
{'name': 'region-mismatch',
'resource': 'ec2',
'region': 'us-east-1'},
config={'region': 'us-west-2', 'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), False)
def test_is_runnable_dates(self):
p = self.load_policy(
{'name': 'good-start-date',
'resource': 'ec2',
'tz': 'UTC',
'start': '2018-3-29'},
config={'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), True)
tomorrow_date = str(datetime.date(datetime.now()) + timedelta(days=1))
p = self.load_policy(
{'name': 'bad-start-date',
'resource': 'ec2',
'tz': 'UTC',
'start': tomorrow_date},
config={'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), False)
p = self.load_policy(
{'name': 'good-end-date',
'resource': 'ec2',
'tz': 'UTC',
'end': tomorrow_date},
config={'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), True)
p = self.load_policy(
{'name': 'bad-end-date',
'resource': 'ec2',
'tz': 'UTC',
'end': '2018-3-29'},
config={'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), False)
p = self.load_policy(
{'name': 'bad-start-end-date',
'resource': 'ec2',
'tz': 'UTC',
'start': '2018-3-28',
'end': '2018-3-29'},
config={'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), False)
def test_is_runnable_parse_dates(self):
p = self.load_policy(
{'name': 'parse-date-policy',
'resource': 'ec2',
'tz': 'UTC',
'start': 'March 3 2018'},
config={'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), True)
p = self.load_policy(
{'name': 'parse-date-policy',
'resource': 'ec2',
'tz': 'UTC',
'start': 'March 3rd 2018'},
config={'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), True)
p = self.load_policy(
{'name': 'parse-date-policy',
'resource': 'ec2',
'tz': 'UTC',
'start': '28 March 2018'},
config={'validate': True},
session_factory=None)
self.assertEqual(p.is_runnable(), True)
class PhdModeTest(BaseTest):
def test_validation(self):
self.assertRaises(
PolicyValidationError,
self.load_policy,
{'name': 'xyz', 'resource': 'ec2',
'mode': {'type': 'phd'}})
self.load_policy(
{'name': 'abc', 'resource': 'account',
'mode': {'type': 'phd'}})
class ConfigModeTest(BaseTest):
def test_config_poll(self):
factory = self.replay_flight_data('test_config_poll_rule_evaluation')
cmock = mock.MagicMock()
requests = []
def record_requests(Evaluations, ResultToken):
requests.append(Evaluations)
cmock.put_evaluations.side_effect = record_requests
cmock.put_evaluations.return_value = {}
self.patch(
ConfigPollRuleMode, '_get_client', lambda self: cmock)
p = self.load_policy({
'name': 'kin-poll',
'resource': 'aws.kinesis',
'filters': [{'tag:App': 'Dev'}],
'mode': {
'type': 'config-poll-rule',
'schedule': 'Three_Hours'}},
session_factory=factory)
event = event_data('poll-evaluation.json', 'config')
results = p.push(event, None)
self.assertEqual(results, ['dev2'])
self.assertEqual(
requests,
[[{'Annotation': 'The resource is not compliant with policy:kin-poll.',
'ComplianceResourceId': 'dev2',
'ComplianceResourceType': 'AWS::Kinesis::Stream',
'ComplianceType': 'NON_COMPLIANT',
'OrderingTimestamp': '2020-05-03T13:55:44.576Z'}],
[{'Annotation': 'The resource is compliant with policy:kin-poll.',
'ComplianceResourceId': 'dev1',
'ComplianceResourceType': 'AWS::Kinesis::Stream',
'ComplianceType': 'COMPLIANT',
'OrderingTimestamp': '2020-05-03T13:55:44.576Z'}]])
class GuardModeTest(BaseTest):
def test_unsupported_resource(self):
self.assertRaises(
ValueError,
self.load_policy,
{"name": "vpc", "resource": "vpc", "mode": {"type": "guard-duty"}},
validate=True,
)
def test_lambda_policy_validate_name(self):
name = "ec2-instance-guard-D8488F01-0E3E-4772-A3CB-E66EEBB9BDF4"
with self.assertRaises(PolicyValidationError) as e_cm:
self.load_policy(
{"name": name,
"resource": "ec2",
"mode": {"type": "guard-duty"}},
validate=True)
self.assertTrue("max length with prefix" in str(e_cm.exception))
@mock.patch("c7n.mu.LambdaManager.publish")
def test_ec2_guard_event_pattern(self, publish):
def assert_publish(policy_lambda, role):
events = policy_lambda.get_events(mock.MagicMock())
self.assertEqual(len(events), 1)
pattern = json.loads(events[0].render_event_pattern())
expected = {
"source": ["aws.guardduty"],
"detail": {"resource": {"resourceType": ["Instance"]}},
"detail-type": ["GuardDuty Finding"],
}
self.assertEqual(pattern, expected)
publish.side_effect = assert_publish
p = self.load_policy(
{
"name": "ec2-instance-guard",
"resource": "ec2",
"mode": {"type": "guard-duty"},
}
)
p.run()
@mock.patch("c7n.mu.LambdaManager.publish")
def test_iam_guard_event_pattern(self, publish):
def assert_publish(policy_lambda, role):
events = policy_lambda.get_events(mock.MagicMock())
self.assertEqual(len(events), 1)
pattern = json.loads(events[0].render_event_pattern())
expected = {
"source": ["aws.guardduty"],
"detail": {"resource": {"resourceType": ["AccessKey"]}},
"detail-type": ["GuardDuty Finding"],
}
self.assertEqual(pattern, expected)
publish.side_effect = assert_publish
p = self.load_policy(
{
"name": "iam-user-guard",
"resource": "iam-user",
"mode": {"type": "guard-duty"},
}
)
p.run()
@mock.patch("c7n.query.QueryResourceManager.get_resources")
def test_ec2_instance_guard(self, get_resources):
def instances(ids, cache=False):
return [{"InstanceId": ids[0]}]
get_resources.side_effect = instances
p = self.load_policy(
{
"name": "ec2-instance-guard",
"resource": "ec2",
"mode": {"type": "guard-duty"},
}
)
event = event_data("ec2-duty-event.json")
results = p.push(event, None)
self.assertEqual(results, [{"InstanceId": "i-99999999"}])
@mock.patch("c7n.query.QueryResourceManager.get_resources")
def test_iam_user_access_key_annotate(self, get_resources):
def users(ids, cache=False):
return [{"UserName": ids[0]}]
get_resources.side_effect = users
p = self.load_policy(
{
"name": "user-key-guard",
"resource": "iam-user",
"mode": {"type": "guard-duty"},
}
)
event = event_data("iam-duty-event.json")
results = p.push(event, None)
self.assertEqual(
results,
[
{
u"UserName": u"GeneratedFindingUserName",
u"c7n:AccessKeys": {u"AccessKeyId": u"GeneratedFindingAccessKeyId"},
}
],
)
|
|
import logging
import re
from django.conf import settings
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http.response import HttpResponse
from django.utils.translation import ugettext as _
from oscar.core.loading import get_class
from rest_framework import exceptions
from rest_framework import authentication
from oscarapi.basket.operations import (
request_contains_basket,
store_basket_in_session,
get_basket
)
from oscarapi.utils import (
get_domain,
session_id_from_parsed_session_uri,
get_session
)
from oscarapi import models
BasketMiddleware = get_class('basket.middleware', 'BasketMiddleware')
logger = logging.getLogger(__name__)
HTTP_SESSION_ID_REGEX = re.compile(
r'^SID:(?P<type>(?:ANON|AUTH)):(?P<realm>.*?):(?P<session_id>.+?)(?:[-:][0-9a-fA-F]+){0,2}$')
def parse_session_id(request):
"""
Parse a session id from the request.
>>> class request:
... META = {'HTTP_SESSION_ID': None}
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:example.com:987171879'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '987171879'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:AUTH:example.com:987171879'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '987171879'), ('type', 'AUTH')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:example.com:987171879-16EF'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '987171879'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:example.com:98717-16EF:100'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '98717'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON::987171879'
>>> sorted(parse_session_id(request).items())
[('realm', ''), ('session_id', '987171879'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:example.com:923-thread1'
>>> sorted(parse_session_id(request).items())
[('realm', 'example.com'), ('session_id', '923-thread1'), ('type', 'ANON')]
>>>
>>> request.META['HTTP_SESSION_ID'] = 'SID:BULLSHIT:example.com:987171879'
>>> parse_session_id(request)
>>> request.META['HTTP_SESSION_ID'] = 'ENTIREGABRBAGE'
>>> parse_session_id(request)
>>> request.META['HTTP_SESSION_ID'] = 'SID:ANON:987171879'
>>> parse_session_id(request)
"""
unparsed_session_id = request.META.get('HTTP_SESSION_ID', None)
if unparsed_session_id is not None:
parsed_session_id = HTTP_SESSION_ID_REGEX.match(unparsed_session_id)
if parsed_session_id is not None:
return parsed_session_id.groupdict()
return None
def start_or_resume(session_id, session_type):
if session_type == 'ANON':
return get_session(session_id, raise_on_create=False)
return get_session(session_id, raise_on_create=True)
class IsApiRequest(object):
@staticmethod
def is_api_request(request):
path = request.path.lower()
api_root = reverse('api-root').lower()
return path.startswith(api_root)
class HeaderSessionMiddleware(SessionMiddleware, IsApiRequest):
"""
Implement session through headers:
http://www.w3.org/TR/WD-session-id
TODO:
Implement gateway protection, with permission options for usage of
header sessions. With that in place the api can be used for both trusted
and non trusted clients, see README.rst.
"""
def process_request(self, request):
"""
Parse the session id from the 'Session-Id: ' header when using the api.
"""
if self.is_api_request(request):
try:
parsed_session_uri = parse_session_id(request)
if parsed_session_uri is not None:
domain = get_domain(request)
if parsed_session_uri['realm'] != domain:
raise exceptions.NotAcceptable(
_('Can not accept cookie with realm %s on realm %s') % (
parsed_session_uri['realm'],
domain
)
)
session_id = session_id_from_parsed_session_uri(
parsed_session_uri)
request.session = start_or_resume(
session_id, session_type=parsed_session_uri['type'])
request.parsed_session_uri = parsed_session_uri
# since the session id is assigned by the CLIENT, there is
# no point in having csrf_protection. Session id's read
# from cookies, still need csrf!
request.csrf_processing_done = True
return None
except exceptions.APIException as e:
response = HttpResponse('{"reason": "%s"}' % e.detail,
content_type='application/json')
response.status_code = e.status_code
return response
return super(HeaderSessionMiddleware, self).process_request(request)
def process_response(self, request, response):
"""
Add the 'Session-Id: ' header when using the api.
"""
if self.is_api_request(request) \
and getattr(request, 'session', None) is not None \
and hasattr(request, 'parsed_session_uri'):
session_key = request.session.session_key
parsed_session_key = session_id_from_parsed_session_uri(
request.parsed_session_uri)
assert(session_key == parsed_session_key), \
'%s is not equal to %s' % (session_key, parsed_session_key)
response['Session-Id'] = \
'SID:%(type)s:%(realm)s:%(session_id)s' % (
request.parsed_session_uri)
return super(HeaderSessionMiddleware, self).process_response(
request, response)
class ApiGatewayMiddleWare(IsApiRequest):
"""
Protect the api gateway with a token.
"""
def process_request(self, request):
if self.is_api_request(request):
key = authentication.get_authorization_header(request)
if models.ApiKey.objects.filter(key=key).exists():
return None
logger.error('Invalid credentials provided for %s:%s by %s' % (
request.method,
request.path,
request.META.get('REMOTE_ADDR', '<unknown>')
))
raise PermissionDenied()
return None
class ApiBasketMiddleWare(BasketMiddleware, IsApiRequest):
"""
Use this middleware instead of Oscar's basket middleware if you
want to mix the api with and regular oscar views.
Oscar uses a cookie based session to store baskets for anonymous users, but
oscarapi can not do that, because we don't want to put the burden
of managing a cookie jar on oscarapi clients that are not websites.
"""
def process_request(self, request):
super(ApiBasketMiddleWare, self).process_request(request)
if self.is_api_request(request):
# we should make sure that any cookie baskets are turned into
# session baskets, since oscarapi uses only baskets from the
# session.
cookie_key = self.get_cookie_key(request)
basket = self.get_cookie_basket(
cookie_key,
request,
Exception("get_cookie_basket doesn't use the manager argument")
)
if basket is not None:
if request_contains_basket(request, basket):
pass
else:
store_basket_in_session(basket, request.session)
def process_response(self, request, response):
if self.is_api_request(request) and hasattr(request, 'user') and request.session:
# at this point we are sure a basket can be found in the session
# (if the session hasn't been destroyed by logging out),
# because it is enforced in process_request.
# We just have to make sure it is stored as a cookie, because it
# could have been created by oscarapi.
cookie_key = self.get_cookie_key(request)
basket = get_basket(request)
cookie = self.get_basket_hash(basket.id)
# Delete any surplus cookies
cookies_to_delete = getattr(request, 'cookies_to_delete', [])
for cookie_key in cookies_to_delete:
response.delete_cookie(cookie_key)
if not request.user.is_authenticated():
response.set_cookie(
cookie_key, cookie,
max_age=settings.OSCAR_BASKET_COOKIE_LIFETIME,
secure=settings.OSCAR_BASKET_COOKIE_SECURE, httponly=True)
return response
else:
return super(
ApiBasketMiddleWare, self).process_response(request, response)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's C{batchSize}
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
from pyspark.util import _exception_message
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
def _create_batch(series, timezone):
"""
Create an Arrow record batch from the given pandas.Series or list of Series, with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:param timezone: A timezone to respect when handling timestamp values
:return: Arrow RecordBatch
"""
import decimal
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.types import _check_series_convert_timestamps_internal
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
# TODO: maybe don't need None check anymore as of Arrow 0.9.1
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s.fillna(0), timezone)
# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2
return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False)
elif t is not None and pa.types.is_string(t) and sys.version < '3':
# TODO: need decode before converting to Arrow in Python 2
# TODO: don't need as of Arrow 0.9.1
return pa.Array.from_pandas(s.apply(
lambda v: v.decode("utf-8") if isinstance(v, str) else v), mask=mask, type=t)
elif t is not None and pa.types.is_decimal(t) and \
LooseVersion("0.9.0") <= LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
# TODO: see ARROW-2432. Remove when the minimum PyArrow version becomes 0.10.0.
return pa.Array.from_pandas(s.apply(
lambda v: decimal.Decimal('NaN') if v is None else v), mask=mask, type=t)
return pa.Array.from_pandas(s, mask=mask, type=t)
arrs = [create_array(s, t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def __init__(self, timezone):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.types import from_arrow_type, \
_check_series_convert_date, _check_series_localize_timestamps
s = arrow_column.to_pandas()
s = _check_series_convert_date(s, from_arrow_type(arrow_column.type))
s = _check_series_localize_timestamps(s, self._timezone)
return s
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series, self._timezone)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, 2)
except pickle.PickleError:
raise
except Exception as e:
emsg = _exception_message(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
cloudpickle.print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
class ChunkedStream(object):
"""
This is a file-like object takes a stream of data, of unknown length, and breaks it into fixed
length frames. The intended use case is serializing large data and sending it immediately over
a socket -- we do not want to buffer the entire data before sending it, but the receiving end
needs to know whether or not there is more data coming.
It works by buffering the incoming data in some fixed-size chunks. If the buffer is full, it
first sends the buffer size, then the data. This repeats as long as there is more data to send.
When this is closed, it sends the length of whatever data is in the buffer, then that data, and
finally a "length" of -1 to indicate the stream has completed.
"""
def __init__(self, wrapped, buffer_size):
self.buffer_size = buffer_size
self.buffer = bytearray(buffer_size)
self.current_pos = 0
self.wrapped = wrapped
def write(self, bytes):
byte_pos = 0
byte_remaining = len(bytes)
while byte_remaining > 0:
new_pos = byte_remaining + self.current_pos
if new_pos < self.buffer_size:
# just put it in our buffer
self.buffer[self.current_pos:new_pos] = bytes[byte_pos:]
self.current_pos = new_pos
byte_remaining = 0
else:
# fill the buffer, send the length then the contents, and start filling again
space_left = self.buffer_size - self.current_pos
new_byte_pos = byte_pos + space_left
self.buffer[self.current_pos:self.buffer_size] = bytes[byte_pos:new_byte_pos]
write_int(self.buffer_size, self.wrapped)
self.wrapped.write(self.buffer)
byte_remaining -= space_left
byte_pos = new_byte_pos
self.current_pos = 0
def close(self):
# if there is anything left in the buffer, write it out first
if self.current_pos > 0:
write_int(self.current_pos, self.wrapped)
self.wrapped.write(self.buffer[:self.current_pos])
# -1 length indicates to the receiving end that we're done.
write_int(-1, self.wrapped)
self.wrapped.close()
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
|
|
# Copyright (c) 2015-2016, 2018 Claudiu Popa <[email protected]>
# Copyright (c) 2015-2016 Ceridwen <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Nick Drozd <[email protected]>
# Copyright (c) 2018 Anthony Sottile <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
from astroid import bases
from astroid import context as contextmod
from astroid import exceptions
from astroid import nodes
from astroid import util
class CallSite:
"""Class for understanding arguments passed into a call site
It needs a call context, which contains the arguments and the
keyword arguments that were passed into a given call site.
In order to infer what an argument represents, call
:meth:`infer_argument` with the corresponding function node
and the argument name.
"""
def __init__(self, callcontext, argument_context_map=None):
if argument_context_map is None:
argument_context_map = {}
self.argument_context_map = argument_context_map
args = callcontext.args
keywords = callcontext.keywords
self.duplicated_keywords = set()
self._unpacked_args = self._unpack_args(args)
self._unpacked_kwargs = self._unpack_keywords(keywords)
self.positional_arguments = [
arg for arg in self._unpacked_args if arg is not util.Uninferable
]
self.keyword_arguments = {
key: value
for key, value in self._unpacked_kwargs.items()
if value is not util.Uninferable
}
@classmethod
def from_call(cls, call_node):
"""Get a CallSite object from the given Call node."""
callcontext = contextmod.CallContext(call_node.args, call_node.keywords)
return cls(callcontext)
def has_invalid_arguments(self):
"""Check if in the current CallSite were passed *invalid* arguments
This can mean multiple things. For instance, if an unpacking
of an invalid object was passed, then this method will return True.
Other cases can be when the arguments can't be inferred by astroid,
for example, by passing objects which aren't known statically.
"""
return len(self.positional_arguments) != len(self._unpacked_args)
def has_invalid_keywords(self):
"""Check if in the current CallSite were passed *invalid* keyword arguments
For instance, unpacking a dictionary with integer keys is invalid
(**{1:2}), because the keys must be strings, which will make this
method to return True. Other cases where this might return True if
objects which can't be inferred were passed.
"""
return len(self.keyword_arguments) != len(self._unpacked_kwargs)
def _unpack_keywords(self, keywords):
values = {}
context = contextmod.InferenceContext()
context.extra_context = self.argument_context_map
for name, value in keywords:
if name is None:
# Then it's an unpacking operation (**)
try:
inferred = next(value.infer(context=context))
except exceptions.InferenceError:
values[name] = util.Uninferable
continue
if not isinstance(inferred, nodes.Dict):
# Not something we can work with.
values[name] = util.Uninferable
continue
for dict_key, dict_value in inferred.items:
try:
dict_key = next(dict_key.infer(context=context))
except exceptions.InferenceError:
values[name] = util.Uninferable
continue
if not isinstance(dict_key, nodes.Const):
values[name] = util.Uninferable
continue
if not isinstance(dict_key.value, str):
values[name] = util.Uninferable
continue
if dict_key.value in values:
# The name is already in the dictionary
values[dict_key.value] = util.Uninferable
self.duplicated_keywords.add(dict_key.value)
continue
values[dict_key.value] = dict_value
else:
values[name] = value
return values
def _unpack_args(self, args):
values = []
context = contextmod.InferenceContext()
context.extra_context = self.argument_context_map
for arg in args:
if isinstance(arg, nodes.Starred):
try:
inferred = next(arg.value.infer(context=context))
except exceptions.InferenceError:
values.append(util.Uninferable)
continue
if inferred is util.Uninferable:
values.append(util.Uninferable)
continue
if not hasattr(inferred, "elts"):
values.append(util.Uninferable)
continue
values.extend(inferred.elts)
else:
values.append(arg)
return values
def infer_argument(self, funcnode, name, context):
"""infer a function argument value according to the call context
Arguments:
funcnode: The function being called.
name: The name of the argument whose value is being inferred.
context: Inference context object
"""
if name in self.duplicated_keywords:
raise exceptions.InferenceError(
"The arguments passed to {func!r} " " have duplicate keywords.",
call_site=self,
func=funcnode,
arg=name,
context=context,
)
# Look into the keywords first, maybe it's already there.
try:
return self.keyword_arguments[name].infer(context)
except KeyError:
pass
# Too many arguments given and no variable arguments.
if len(self.positional_arguments) > len(funcnode.args.args):
if not funcnode.args.vararg:
raise exceptions.InferenceError(
"Too many positional arguments "
"passed to {func!r} that does "
"not have *args.",
call_site=self,
func=funcnode,
arg=name,
context=context,
)
positional = self.positional_arguments[: len(funcnode.args.args)]
vararg = self.positional_arguments[len(funcnode.args.args) :]
argindex = funcnode.args.find_argname(name)[0]
kwonlyargs = {arg.name for arg in funcnode.args.kwonlyargs}
kwargs = {
key: value
for key, value in self.keyword_arguments.items()
if key not in kwonlyargs
}
# If there are too few positionals compared to
# what the function expects to receive, check to see
# if the missing positional arguments were passed
# as keyword arguments and if so, place them into the
# positional args list.
if len(positional) < len(funcnode.args.args):
for func_arg in funcnode.args.args:
if func_arg.name in kwargs:
arg = kwargs.pop(func_arg.name)
positional.append(arg)
if argindex is not None:
# 2. first argument of instance/class method
if argindex == 0 and funcnode.type in ("method", "classmethod"):
if context.boundnode is not None:
boundnode = context.boundnode
else:
# XXX can do better ?
boundnode = funcnode.parent.frame()
if isinstance(boundnode, nodes.ClassDef):
# Verify that we're accessing a method
# of the metaclass through a class, as in
# `cls.metaclass_method`. In this case, the
# first argument is always the class.
method_scope = funcnode.parent.scope()
if method_scope is boundnode.metaclass():
return iter((boundnode,))
if funcnode.type == "method":
if not isinstance(boundnode, bases.Instance):
boundnode = bases.Instance(boundnode)
return iter((boundnode,))
if funcnode.type == "classmethod":
return iter((boundnode,))
# if we have a method, extract one position
# from the index, so we'll take in account
# the extra parameter represented by `self` or `cls`
if funcnode.type in ("method", "classmethod"):
argindex -= 1
# 2. search arg index
try:
return self.positional_arguments[argindex].infer(context)
except IndexError:
pass
if funcnode.args.kwarg == name:
# It wants all the keywords that were passed into
# the call site.
if self.has_invalid_keywords():
raise exceptions.InferenceError(
"Inference failed to find values for all keyword arguments "
"to {func!r}: {unpacked_kwargs!r} doesn't correspond to "
"{keyword_arguments!r}.",
keyword_arguments=self.keyword_arguments,
unpacked_kwargs=self._unpacked_kwargs,
call_site=self,
func=funcnode,
arg=name,
context=context,
)
kwarg = nodes.Dict(
lineno=funcnode.args.lineno,
col_offset=funcnode.args.col_offset,
parent=funcnode.args,
)
kwarg.postinit(
[(nodes.const_factory(key), value) for key, value in kwargs.items()]
)
return iter((kwarg,))
if funcnode.args.vararg == name:
# It wants all the args that were passed into
# the call site.
if self.has_invalid_arguments():
raise exceptions.InferenceError(
"Inference failed to find values for all positional "
"arguments to {func!r}: {unpacked_args!r} doesn't "
"correspond to {positional_arguments!r}.",
positional_arguments=self.positional_arguments,
unpacked_args=self._unpacked_args,
call_site=self,
func=funcnode,
arg=name,
context=context,
)
args = nodes.Tuple(
lineno=funcnode.args.lineno,
col_offset=funcnode.args.col_offset,
parent=funcnode.args,
)
args.postinit(vararg)
return iter((args,))
# Check if it's a default parameter.
try:
return funcnode.args.default_value(name).infer(context)
except exceptions.NoDefault:
pass
raise exceptions.InferenceError(
"No value found for argument {name} to " "{func!r}",
call_site=self,
func=funcnode,
arg=name,
context=context,
)
|
|
""" Test functions for the sparse.linalg.eigen.lobpcg module
"""
from __future__ import division, print_function, absolute_import
import itertools
import platform
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
import pytest
from numpy import ones, r_, diag
from numpy.random import rand
from scipy.linalg import eig, eigh, toeplitz, orth
from scipy.sparse import spdiags, diags, eye
from scipy.sparse.linalg import eigs, LinearOperator
from scipy.sparse.linalg.eigen.lobpcg import lobpcg
def ElasticRod(n):
"""Build the matrices for the generalized eigenvalue problem of the
fixed-free elastic rod vibration model.
"""
L = 1.0
le = L/n
rho = 7.85e3
S = 1.e-4
E = 2.1e11
mass = rho*S*le/6.
k = E*S/le
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
return A, B
def MikotaPair(n):
"""Build a pair of full diagonal matrices for the generalized eigenvalue
problem. The Mikota pair acts as a nice test since the eigenvalues are the
squares of the integers n, n=1,2,...
"""
x = np.arange(1, n+1)
B = diag(1./x)
y = np.arange(n-1, 0, -1)
z = np.arange(2*n-1, 0, -2)
A = diag(z)-diag(y, -1)-diag(y, 1)
return A, B
def compare_solutions(A, B, m):
"""Check eig vs. lobpcg consistency.
"""
n = A.shape[0]
np.random.seed(0)
V = rand(n, m)
X = orth(V)
eigvals, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False)
eigvals.sort()
w, _ = eig(A, b=B)
w.sort()
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
def test_Small():
A, B = ElasticRod(10)
compare_solutions(A, B, 10)
A, B = MikotaPair(10)
compare_solutions(A, B, 10)
def test_ElasticRod():
A, B = ElasticRod(100)
compare_solutions(A, B, 20)
def test_MikotaPair():
A, B = MikotaPair(100)
compare_solutions(A, B, 20)
def test_regression():
"""Check the eigenvalue of the identity matrix is one.
"""
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
n = 10
X = np.ones((n, 1))
A = np.identity(n)
w, _ = lobpcg(A, X)
assert_allclose(w, [1])
def test_diagonal():
"""Check for diagonal matrices.
"""
# This test was moved from '__main__' in lobpcg.py.
# Coincidentally or not, this is the same eigensystem
# required to reproduce arpack bug
# https://forge.scilab.org/p/arpack-ng/issues/1397/
# even using the same n=100.
np.random.seed(1234)
# The system of interest is of size n x n.
n = 100
# We care about only m eigenpairs.
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A to be the diagonal matrix whose entries are 1..n
# and where B is chosen to be the identity matrix.
vals = np.arange(1, n+1, dtype=float)
A = diags([vals], [0], (n, n))
B = eye(n)
# Let the preconditioner M be the inverse of A.
M = diags([1./vals], [0], (n, n))
# Pick random initial vectors.
X = np.random.rand(n, m)
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors.
m_excluded = 3
Y = np.eye(n, m_excluded)
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y, tol=1e-4, maxiter=40, largest=False)
assert_allclose(eigvals, np.arange(1+m_excluded, 1+m_excluded+m))
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
"""Check if the eigenvalue residual is small.
"""
mult_wV = np.multiply(w, V)
dot_MV = M.dot(V)
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
def _check_fiedler(n, p):
"""Check the Fiedler vector computation.
"""
# This is not necessarily the recommended way to find the Fiedler vector.
np.random.seed(1234)
col = np.zeros(n)
col[1] = 1
A = toeplitz(col)
D = np.diag(A.sum(axis=1))
L = D - A
# Compute the full eigendecomposition using tricks, e.g.
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
tmp = np.pi * np.arange(n) / n
analytic_w = 2 * (1 - np.cos(tmp))
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
_check_eigen(L, analytic_w, analytic_V)
# Compute the full eigendecomposition using eigh.
eigh_w, eigh_V = eigh(L)
_check_eigen(L, eigh_w, eigh_V)
# Check that the first eigenvalue is near zero and that the rest agree.
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
assert_allclose(eigh_w[1:], analytic_w[1:])
# Check small lobpcg eigenvalues.
X = analytic_V[:, :p]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
# Check large lobpcg eigenvalues.
X = analytic_V[:, -p:]
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
assert_equal(lobpcg_w.shape, (p,))
assert_equal(lobpcg_V.shape, (n, p))
_check_eigen(L, lobpcg_w, lobpcg_V)
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
# Look for the Fiedler vector using good but not exactly correct guesses.
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
X = np.vstack((np.ones(n), fiedler_guess)).T
lobpcg_w, _ = lobpcg(L, X, largest=False)
# Mathematically, the smaller eigenvalue should be zero
# and the larger should be the algebraic connectivity.
lobpcg_w = np.sort(lobpcg_w)
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
def test_fiedler_small_8():
"""Check the dense workaround path for small matrices.
"""
# This triggers the dense path because 8 < 2*5.
_check_fiedler(8, 2)
def test_fiedler_large_12():
"""Check the dense workaround path avoided for non-small matrices.
"""
# This does not trigger the dense path, because 2*5 <= 12.
_check_fiedler(12, 2)
def test_hermitian():
"""Check complex-value Hermitian cases.
"""
np.random.seed(1234)
sizes = [3, 10, 50]
ks = [1, 3, 10, 50]
gens = [True, False]
for size, k, gen in itertools.product(sizes, ks, gens):
if k > size:
continue
H = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
H = 10 * np.eye(size) + H + H.T.conj()
X = np.random.rand(size, k)
if not gen:
B = np.eye(size)
w, v = lobpcg(H, X, maxiter=5000)
w0, _ = eigh(H)
else:
B = np.random.rand(size, size) + 1.j * np.random.rand(size, size)
B = 10 * np.eye(size) + B.dot(B.T.conj())
w, v = lobpcg(H, X, B, maxiter=5000, largest=False)
w0, _ = eigh(H, B)
for wx, vx in zip(w, v.T):
# Check eigenvector
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
/ np.linalg.norm(H.dot(vx)),
0, atol=5e-4, rtol=0)
# Compare eigenvalues
j = np.argmin(abs(w0 - wx))
assert_allclose(wx, w0[j], rtol=1e-4)
# The n=5 case tests the alternative small matrix code path that uses eigh().
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
def test_eigs_consistency(n, atol):
"""Check eigs vs. lobpcg consistency.
"""
vals = np.arange(1, n+1, dtype=np.float64)
A = spdiags(vals, 0, n, n)
np.random.seed(345678)
X = np.random.rand(n, 2)
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
vals, _ = eigs(A, k=2)
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
def test_verbosity(tmpdir):
"""Check that nonzero verbosity level code runs.
"""
A, B = ElasticRod(100)
n = A.shape[0]
m = 20
np.random.seed(0)
V = rand(n, m)
X = orth(V)
_, _ = lobpcg(A, X, B=B, tol=1e-5, maxiter=30, largest=False,
verbosityLevel=9)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_tolerance_float32():
"""Check lobpcg for attainable tolerance in float32.
"""
np.random.seed(1234)
n = 50
m = 3
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.randn(n, m)
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1e-9, maxiter=50, verbosityLevel=0)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-5)
def test_random_initial_float32():
"""Check lobpcg in float32 for specific initial.
"""
np.random.seed(3)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.rand(n, m)
X = X.astype(np.float32)
eigvals, _ = lobpcg(A, X, tol=1e-3, maxiter=50, verbosityLevel=1)
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=1e-2)
def test_maxit_None():
"""Check lobpcg if maxit=None runs 20 iterations (the default)
by checking the size of the iteration history output, which should
be the number of iterations plus 2 (initial and final values).
"""
np.random.seed(1566950023)
n = 50
m = 4
vals = -np.arange(1, n + 1)
A = diags([vals], [0], (n, n))
A = A.astype(np.float32)
X = np.random.randn(n, m)
X = X.astype(np.float32)
_, _, l_h = lobpcg(A, X, tol=1e-8, maxiter=20, retLambdaHistory=True)
assert_allclose(np.shape(l_h)[0], 20+2)
@pytest.mark.slow
def test_diagonal_data_types():
"""Check lobpcg for diagonal matrices for all matrix types.
"""
np.random.seed(1234)
n = 50
m = 4
# Define the generalized eigenvalue problem Av = cBv
# where (c, v) is a generalized eigenpair,
# and where we choose A and B to be diagonal.
vals = np.arange(1, n + 1)
list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
for s_f in list_sparse_format:
As64 = diags([vals * vals], [0], (n, n), format=s_f)
As32 = As64.astype(np.float32)
Af64 = As64.toarray()
Af32 = Af64.astype(np.float32)
listA = [Af64, As64, Af32, As32]
Bs64 = diags([vals], [0], (n, n), format=s_f)
Bf64 = Bs64.toarray()
listB = [Bf64, Bs64]
# Define the preconditioner function as LinearOperator.
Ms64 = diags([1./vals], [0], (n, n), format=s_f)
def Ms64precond(x):
return Ms64 @ x
Ms64precondLO = LinearOperator(matvec=Ms64precond,
matmat=Ms64precond,
shape=(n, n), dtype=float)
Mf64 = Ms64.toarray()
def Mf64precond(x):
return Mf64 @ x
Mf64precondLO = LinearOperator(matvec=Mf64precond,
matmat=Mf64precond,
shape=(n, n), dtype=float)
Ms32 = Ms64.astype(np.float32)
def Ms32precond(x):
return Ms32 @ x
Ms32precondLO = LinearOperator(matvec=Ms32precond,
matmat=Ms32precond,
shape=(n, n), dtype=np.float32)
Mf32 = Ms32.toarray()
def Mf32precond(x):
return Mf32 @ x
Mf32precondLO = LinearOperator(matvec=Mf32precond,
matmat=Mf32precond,
shape=(n, n), dtype=np.float32)
listM = [None, Ms64precondLO, Mf64precondLO,
Ms32precondLO, Mf32precondLO]
# Setup matrix of the initial approximation to the eigenvectors
# (cannot be sparse array).
Xf64 = np.random.rand(n, m)
Xf32 = Xf64.astype(np.float32)
listX = [Xf64, Xf32]
# Require that the returned eigenvectors be in the orthogonal complement
# of the first few standard basis vectors (cannot be sparse array).
m_excluded = 3
Yf64 = np.eye(n, m_excluded, dtype=float)
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
listY = [Yf64, Yf32]
for A, B, M, X, Y in itertools.product(listA, listB, listM, listX,
listY):
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
maxiter=100, largest=False)
assert_allclose(eigvals,
np.arange(1 + m_excluded, 1 + m_excluded + m))
|
|
# -*- coding: utf-8 -*-
#
# pymunk documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 03 03:50:06 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# To allow readthedocs.org build documentation without the chipmunk library file
class Mock(object):
__package__ = 'pymunk'
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['pymunk._chipmunk', 'pymunk._chipmunk_ffi',
'pygame', 'pygame.locals', 'pygame.color'
]
class MockFinder():
def find_module(self, fullname, path=None):
if fullname in MOCK_MODULES:
return self
return None
def load_module(self, fullname):
return Mock()
sys.meta_path = [MockFinder()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [#'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'ext.autoexample']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pymunk'
copyright = u'2012, Victor Blomqvist'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
#version = '3.0'
# The full version, including alpha/beta/rc tags.
#release = '3.0.0'
import pymunk
version = pymunk.version
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_templates"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# #a0c0ff #b8d0ff #cbdcff #345aa6
# #eeeeee #000000
# 355F7C 6c9cbe 82a5be 113651
# color palette #a0c0ff #345aa6 #EEEEEE #000000
# color palette #355F7C #6c9cbe #EEEEEE #000000 (unused: #82a5be #113651 #354c5d)
html_theme_options = {
'relbarbgcolor': '#6c9cbe',
'sidebarbgcolor': '#EEEEEE',
'headbgcolor': '#EEEEEE',
'headtextcolor': '#355F7C',
'headlinkcolor': '#355F7C',
'footerbgcolor': '#355F7C',
'sidebartextcolor': '#000000',
'sidebarlinkcolor': '#355F7C',
'linkcolor': '#355F7C',
'visitedlinkcolor': '#355F7C',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "pymunk_logo_sphinx.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "pymunk_favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = "pymunk.css"
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'],
}
#html_sidebars = {'[rieta]*': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'],
# 'pymunk*': ['globaltoc.html', 'classtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pymunkdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pymunk.tex', u'pymunk Documentation',
u'Victor Blomqvist', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymunk', u'pymunk Documentation',
[u'Victor Blomqvist'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pymunk', u'pymunk Documentation',
u'Victor Blomqvist', 'pymunk', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
|
# Copyright (c) 2015-2021 Patricio Cubillos and contributors.
# mc3 is open-source software under the MIT license (see LICENSE).
__all__ = [
'ROOT',
'ignore_system_exit',
'parray',
'saveascii', 'loadascii',
'savebin', 'loadbin',
'isfile',
'burn',
'default_parnames',
]
import os
import sys
import functools
import numpy as np
from .log import Log
ROOT = os.path.realpath(os.path.dirname(__file__) + '/../..') + '/'
def ignore_system_exit(func):
"""Decorator to ignore SystemExit exceptions."""
@functools.wraps(func)
def new_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except SystemExit:
return None
return new_func
def parray(string):
"""
Convert a string containin a list of white-space-separated (and/or
newline-separated) values into a numpy array
"""
if string == 'None':
return None
try: # If they can be converted into doubles, do it:
return np.asarray(string.split(), np.double)
except: # Else, return a string array:
return string.split()
def saveascii(data, filename, precision=8):
"""
Write (numeric) data to ASCII file.
Parameters
----------
data: 1D/2D numeric iterable (ndarray, list, tuple, or combination)
Data to be stored in file.
filename: String
File where to store the arrlist.
precision: Integer
Maximum number of significant digits of values.
Example
-------
>>> import numpy as np
>>> import mc3.utils as mu
>>> a = np.arange(4) * np.pi
>>> b = np.arange(4)
>>> c = np.logspace(0, 12, 4)
>>> outfile = 'delete.me'
>>> mu.saveascii([a,b,c], outfile)
>>> # This will produce this file:
>>> with open(outfile) as f:
>>> print(f.read())
0 0 1
3.1415927 1 10000
6.2831853 2 1e+08
9.424778 3 1e+12
"""
# Force it to be a 2D ndarray:
data = np.array(data, ndmin=2).T
# Save arrays to ASCII file:
with open(filename, 'w') as f:
for parvals in data:
f.write(' '.join(f'{v:9.{precision:d}g}' for v in parvals) + '\n')
def loadascii(filename):
"""
Extract data from file and store in a 2D ndarray (or list of arrays
if not square). Blank or comment lines are ignored.
Parameters
----------
filename: String
Name of file containing the data to read.
Returns
-------
array: 2D ndarray or list
See parameters description.
"""
# Open and read the file:
lines = []
for line in open(filename, 'r'):
if not line.startswith('#') and line.strip() != '':
lines.append(line)
# Count number of lines:
npars = len(lines)
# Extract values:
ncolumns = len(lines[0].split())
array = np.zeros((npars, ncolumns), np.double)
for i, line in enumerate(lines):
array[i] = line.strip().split()
array = np.transpose(array)
return array
def savebin(data, filename):
"""
Write data variables into a numpy npz file.
Parameters
----------
data: List of data objects
Data to be stored in file. Each array must have the same length.
filename: String
File where to store the arrlist.
Note
----
This wrapper around np.savez() preserves the data type of list and
tuple variables when the file is open with loadbin().
Example
-------
>>> import mc3.utils as mu
>>> import numpy as np
>>> # Save list of data variables to file:
>>> datafile = 'datafile.npz'
>>> indata = [np.arange(4), 'one', np.ones((2,2)), True, [42], (42, 42)]
>>> mu.savebin(indata, datafile)
>>> # Now load the file:
>>> outdata = mu.loadbin(datafile)
>>> for data in outdata:
>>> print(repr(data))
array([0, 1, 2, 3])
'one'
array([[ 1., 1.],
[ 1., 1.]])
True
[42]
(42, 42)
"""
# Get the number of elements to determine the key's fmt:
ndata = len(data)
fmt = len(str(ndata))
key = []
for i, datum in enumerate(data):
dkey = 'file{:{}d}'.format(i, fmt)
# Encode in the key if a variable is a list or tuple:
if isinstance(datum, list):
dkey += '_list'
elif isinstance(datum, tuple):
dkey += '_tuple'
elif isinstance(datum, str):
dkey += '_str'
elif isinstance(datum, bool):
dkey += '_bool'
key.append(dkey)
# Use a dictionary so savez() include the keys for each item:
d = dict(zip(key, data))
np.savez(filename, **d)
def loadbin(filename):
"""
Read a binary npz array, casting list and tuple variables into
their original data types.
Parameters
----------
filename: String
Path to file containing the data to be read.
Return
------
data: List
List of objects stored in the file.
Example
-------
See example in savebin().
"""
# Unpack data:
npz = np.load(filename)
data = []
for key, val in sorted(npz.items()):
data.append(val[()])
# Check if val is a str, bool, list, or tuple:
if '_' in key:
exec('data[-1] = ' + key[key.find('_')+1:] + '(data[-1])')
return data
def isfile(input, iname, log, dtype, unpack=True, not_none=False):
"""
Check if an input is a file name; if it is, read it.
Genereate error messages if it is the case.
Parameters
----------
input: Iterable or String
The input variable.
iname: String
Input-variable name.
log: File pointer
If not None, print message to the given file pointer.
dtype: String
File data type, choose between 'bin' or 'ascii'.
unpack: Bool
If True, return the first element of a read file.
not_none: Bool
If True, throw an error if input is None.
"""
# Set the loading function depending on the data type:
if dtype == 'bin':
load = loadbin
elif dtype == 'ascii':
load = loadascii
else:
log.error(
f"Invalid data type '{dtype}', must be either 'bin' or 'ascii'.",
tracklev=-3)
# Check if the input is None, throw error if requested:
if input is None:
if not_none:
log.error(f"'{iname}' is a required argument.", tracklev=-3)
return None
# Check that it is an iterable:
if not np.iterable(input):
log.error(f'{iname} must be an iterable or a file name.', tracklev=-3)
# Check if it is a string, a string in a list, or an array:
if isinstance(input, str):
ifile = input
elif isinstance(input[0], str):
ifile = input[0]
else:
return input
# It is a file name:
if not os.path.isfile(ifile):
log.error(f"{iname} file '{ifile}' not found.", tracklev=-3)
if unpack: # Unpack (remove outer dimension) if necessary
return load(ifile)[0]
return load(ifile)
def burn(Zdict=None, burnin=None, Z=None, zchain=None, sort=True):
"""
Return a posterior distribution removing the burnin initial iterations
of each chain from the input distribution.
Parameters
----------
Zdict: dict
A dictionary (as in mc3's output) containing a posterior distribution
(Z) and number of iterations to burn (burnin).
burnin: Integer
Number of iterations to remove from the start of each chain.
If specified, it overrides value from Zdict.
Z: 2D float ndarray
Posterior distribution (of shape [nsamples,npars]) to consider
if Zdict is None.
zchain: 1D integer ndarray
Chain indices for the samples in Z (used only of Zdict is None).
sort: Bool
If True, sort the outputs by chain index.
Returns
-------
posterior: 2D float ndarray
Burned posterior distribution.
zchain: 1D integer ndarray
Burned zchain array.
zmask: 1D integer ndarray
Indices that transform Z into posterior.
Examples
--------
>>> import mc3.utils as mu
>>> import numpy as np
>>> # Mock a posterior-distribution output:
>>> Z = np.expand_dims([0., 1, 10, 20, 30, 11, 31, 21, 12, 22, 32], axis=1)
>>> zchain = np.array([-1, -1, 0, 1, 2, 0, 2, 1, 0, 1, 2])
>>> Zdict = {'posterior':Z, 'zchain':zchain, 'burnin':1}
>>> # Simply apply burn() into the dict:
>>> posterior, zchain, zmask = mu.burn(Zdict)
>>> print(posterior[:,0])
[11. 12. 21. 22. 31. 32.]
>>> print(zchain)
[0 0 1 1 2 2]
>>> print(zmask)
[ 5 8 7 9 6 10]
>>> # Samples were sorted by chain index, but one can prevent with:
>>> posterior, zchain, zmask = mu.burn(Zdict, sort=False)
>>> print(posterior[:,0])
[11. 31. 21. 12. 22. 32.]
>>> # One can also override the burn-in samples:
>>> posterior, zchain, zmask = mu.burn(Zdict, burnin=0)
>>> print(posterior[:,0])
[10. 11. 12. 20. 21. 22. 30. 31. 32.]
>>> # Or apply directly to arrays:
>>> posterior, zchain, zmask = mu.burn(Z=Z, zchain=zchain, burnin=1)
>>> print(posterior[:,0])
[11. 12. 21. 22. 31. 32.]
"""
if Zdict is None and (Z is None or zchain is None or burnin is None):
raise ValueError(
'Need to input either Zdict or all three of burnin, Z, and zchain')
if Zdict is not None:
Z = Zdict['posterior']
zchain = Zdict['zchain']
if burnin is None:
burnin = Zdict['burnin']
mask = np.zeros_like(zchain, bool)
nchains = np.amax(zchain) + 1
for c in range(nchains):
mask[np.where(zchain == c)[0][burnin:]] = True
if sort:
zsort = np.lexsort([zchain])
zmask = zsort[np.where(mask[zsort])]
else:
zmask = np.where(mask)[0]
# Values accepted for posterior stats:
posterior = Z[zmask]
zchain = zchain[zmask]
return posterior, zchain, zmask
def default_parnames(npars):
"""
Create an array of parameter names with sequential indices.
Parameters
----------
npars: Integer
Number of parameters.
Results
-------
1D string ndarray of parameter names.
"""
namelen = len(str(npars))
return np.array([f'Param {i+1:0{namelen}d}' for i in range(npars)])
|
|
# $Id: pb.py 120 2008-04-10 17:54:14Z mp $
#
# Copyright (c) 2007-2008 ReThought Limited and Peloton Contributors
# All Rights Reserved
# See LICENSE for details
from twisted.internet import reactor
from twisted.spread import pb
from twisted.internet.error import CannotListenError
from peloton.adapters import AbstractPelotonAdapter
from peloton.coreio import PelotonRequestInterface
from peloton.coreio import PelotonInternodeInterface
from peloton.events import RemoteEventHandler
from peloton.exceptions import PelotonError
class PelotonPBAdapter(AbstractPelotonAdapter, pb.Root):
""" The primary client adapter for Peloton is the Python Twisted PB
RPC mechanism. This provides the most complete and sophisticated
interface to the Peloton grid. This adapter is just a gate-keeper though;
anything obtaining this must gain trust and obtain a Referenceable
through which real work can be done.
"""
def __init__(self, kernel):
AbstractPelotonAdapter.__init__(self, kernel, 'TwistedPB')
self.logger = self.kernel.logger
def start(self):
""" In this startup the adapter seeks to bind to a port. It obtains
the host/port to which to bind from the kernel profile, but it
may, according to whether the 'anyport' switch is set or not, seek an
alternative port should its chosen target be bound by another application.
"""
interface,port = self.kernel.settings.bind.split(':')
port = int(port)
svr = pb.PBServerFactory(self)
while True:
try:
self.connection = reactor.listenTCP(port, svr, interface=interface)
self.kernel.profile['bind']= "%s:%d" % (interface, port)
self.kernel.profile['bind_interface'] = interface
self.kernel.profile['bind_port'] = port
break
except CannotListenError:
if self.kernel.settings.anyport == True:
port += 1
else:
raise RuntimeError("Cannot bind to port %d" % port)
except Exception:
self.logger.exception("Could not connect %s" % self.adapterName)
self.logger.info("Bound to %s:%d" % (interface, port))
def _stopped(self, x):
""" Handler called when reactor has stopped listening to this
protocol's port."""
pass
def stop(self):
""" Close down this adapter. """
d = self.connection.stopListening()
d.addCallback(self._stopped)
def remote_registerPSC(self, token):
""" A remote PSC will call registerPSC with a token encrypted
with the domain key. Provided this decrypts we know the remote PSC is
permitted to join in this domain. the remotePSC is a remote instance of
PelotonGridAdapter which provides methods for inter-PSC work.
@todo: it may be that the token can be included in the remotePSC using
copyable type stuff.
"""
self.logger.info("RegisterPSC %s: ref returned with NO VALIDATION" % token)
ref = PelotonInternodeAdapter(self.kernel, token)
return ref
def remote_registerWorker(self, worker, token):
""" A worker registers by sending a KernelInterface
referenceable and a token. The token was passed to the worker
generator and is used simply to verify that this is indeed a valid
and wanted contact."""
self.logger.info("Starting worker, token=%s NOT VALIDATED" % token)
serviceName, publishedName, runtimeConfig = self.kernel.addWorker(worker, token)
pwa = PelotonWorkerAdapter(self, serviceName, self.kernel)
worker.checkBeat = pwa.checkBeat
workerInfo = { 'pwa' : pwa,
'serviceName' : serviceName,
'publishedName' : publishedName,
'runtimeConfig' : runtimeConfig,
'loglevel' : self.kernel.settings.loglevel,
'logdir' : self.kernel.settings.logdir,
'servicePath' : self.kernel.settings.servicepath,
}
return workerInfo
def remote_login(self, clientObj):
""" Login to Peloton. The clientObj contains the credentials to be
used. Returns a PelotonClientAdapter"""
return PelotonClientAdapter(self.kernel, clientObj)
class PelotonInternodeAdapter(pb.Referenceable):
""" Used to call between PSCs. """
def __init__(self, kernel, peerGUID):
self.requestInterface = PelotonInternodeInterface(kernel)
self.logger = kernel.logger
self.peerGUID = peerGUID
self.kernel = kernel
def remote_relayCall(self, service, method, *args, **kwargs):
""" Relay a method call between PSCs. """
return self.requestInterface.public_relayCall(self.peerGUID, service, method, *args, **kwargs)
def remote_getInterface(self, name):
""" Return the named interface to a plugin. """
return self.kernel.getCallable(name)
class PelotonClientAdapter(pb.Referenceable):
""" Referenceable used by client to call methods on the PSC. """
def __init__(self, kernel, clientObj):
self.kernel = kernel
self.dispatcher = kernel.dispatcher
self.routingTable = kernel.routingTable
self.requestInterface = PelotonRequestInterface(kernel)
self.logger = kernel.logger
self.clientObj = clientObj
self.eventHandlers=[]
def remote_call(self, service, method, *args, **kwargs):
""" Make a call to the specified service.method and return the result."""
return self.requestInterface.public_call(self.clientObj, 'raw', service, method, args, kwargs)
def remote_post(self, service, method, *args, **kwargs):
""" Put a call on the call queue for later execution. Do not
return result to client; this call will execute regardless of what the
client does subsequently. """
raise NotImplementedError
def remote_postLater(self, delay_seconds, service, method, *args, **kwargs):
""" Post call onto the call queue after a delay of delay_seconds. """
raise NotImplementedError
def remote_postAt(self, dateTime, service, method, *args, **kwargs):
""" Post call onto the call queue at some future time. """
raise NotImplementedError
def remote_fireEvent(self, key, exchange='events', **kwargs):
""" Fire an event onto the bus. """
self.dispatcher.fireEvent(key, exchange, **kwargs)
def remote_register(self, key, handler, exchange='events'):
""" Register to receive events with the given handler. Handler
must be a Referenceable providing remote_eventReceived."""
handler = RemoteEventHandler(handler)
self.eventHandlers.append(handler)
self.dispatcher.register(key, handler, exchange)
def remote_deregister(self, handler):
""" De-register handler as a listener. """
for h in self.eventHandlers:
if h.remoteHandler == handler:
handler = h
break
else:
# no handler registered
self.logger.error("Attempt to de-register handler for event that is not registered.")
return
self.dispatcher.deregister(handler)
self.eventHandlers.remove(handler)
def remote_getPSCProfile(self, guid=None):
""" Returns the serialised profile for the referenced PSC or self if guid
is None. """
if not guid:
return repr(self.kernel.profile)
else:
try:
return repr(self.routingTable.pscByGUID[guid].profile)
except KeyError:
raise PelotonError("%s is unknown" % guid)
def remote_getRegisteredExchanges(self):
""" Return a list of event exchanges registered in the dispatcher. """
return self.dispatcher.getRegisteredExchanges()
class PelotonWorkerAdapter(pb.Referenceable):
""" Interface by which a worker may invoke actions on the kernel. """
def __init__(self, name, pscRef, kernel):
self.name = name
# each time the worker calls, this sets to zero
# each time the PSC checks it increments the value by
# one... if the value hits a threshold, e.g. 5, the
# worker is considered dead.
self.heartBeat = 0
self.kernel = kernel
self.pscRef = pscRef
self.eventHandlers = []
def remote_notifyClosedown(self):
""" Called when the worker is closing down. """
pass
def remote_fireEvent(self, key, exchange, **kwargs):
""" Fire an event onto the bus. """
self.kernel.dispatcher.fireEvent(key, exchange, **kwargs)
def remote_register(self, key, handler, exchange='events'):
""" Register to receive events with the given handler. Handler
must be a Referenceable providing remote_eventReceived."""
handler = RemoteEventHandler(handler)
self.eventHandlers.append(handler)
self.kernel.dispatcher.register(key, handler, exchange)
def remote_deregister(self, handler):
""" De-register handler as a listener. """
for h in self.eventHandlers:
if h.remoteHandler == handler:
handler = h
break
else:
# no handler registered
self.logger.error("Attempt to de-register handler for event that is not registered.")
return
self.kernel.dispatcher.deregister(handler)
self.eventHandlers.remove(handler)
def remote_heartBeat(self):
""" Called by the client to provide proof of life."""
self.heartBeat = 0
def checkBeat(self, threshold=5):
""" Called from the PSC to check whether the worker
is OK. the heartBeat counter is incremented. If the counter
exceeds the threshold value (default 5) checkBeat returns False,
otherwise returns True. """
self.heartBeat += 1
return self.heartBeat <= threshold
def remote_serviceStartOK(self, version):
""" Called to indicate safe start of service requested. """
self.kernel.logger.info("Worker reports start OK for %s %s" % (self.name, version))
def remote_serviceStartFailed(self, ex):
""" Called with exception if service failed to start. """
self.kernel.logger.info("Worker reports start failed for %s : %s" % (self.name, ex))
|
|
"""
Block device listing
====================
Module for processing output of the ``lsblk`` command. Different information
is provided by the ``lsblk`` command depending upon the options. Parsers
included here are:
LSBlock - Command ``lsblk``
---------------------------
The ``LSBlock`` class parses output of the ``lsblk`` command with no options.
LSBlockPairs - Command ``lsblk -P -o [columns...]``
---------------------------------------------------
The ``LSBlockPairs`` class parses output of the ``lsblk -P -o [columns...]``
command.
These classes based on ``BlockDevices`` which implements all of the
functionality except the parsing of command specific information.
Information is stored in the attribute ``self.rows`` which is a ``list`` of
``BlockDevice`` objects.
Each ``BlockDevice`` object provides the functionality for one row of data from the
command output. Data in a ``BlockDevice`` object is accessible by multiple methods.
For example the NAME field can be accessed in the following four ways::
lsblk_info.rows[0].data['NAME']
lsblk_info.rows[0].NAME
lsblk_info.rows[0].name
lsblk_info.rows[0].get('NAME')
Sample output of the ``lsblk`` command looks like::
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 252:0 0 9G 0 disk
|-vda1 252:1 0 500M 0 part /boot
`-vda2 252:2 0 8.5G 0 part
|-rhel-root 253:0 0 7.6G 0 lvm /
|-rhel-swap 253:1 0 924M 0 lvm [SWAP]
sda 8:0 0 500G 0 disk
`-sda1 8:1 0 500G 0 part /data
Note the hierarchy demonstrated in the name column. For instance ``vda1`` and
``vda2`` are children of ``vda``. Likewise, ``rhel-root`` and ``rhel-swap``
are children of ``vda2``. This relationship is demonstrated in the
``PARENT_NAMES`` key, which is only present if the row is a *child* row. For
example ``PARENT_NAMES`` value for ``rhel-root`` will be ``['vda', 'vda2']``
meaning that ``vda2`` is the immediate parent and ``vda`` is parent of
``vda2``.
Also note that column names that are not valid Python property names been
changed. For example ``MAJ:MIN`` has been changed to ``MAJ_MIN``.
Examples:
>>> lsblk_info = shared[LSBlock]
>>> lsblk_info
<insights.parsers.lsblk.LSBlock object at 0x7f1f6a422d50>
>>> lsblk_info.rows
[disk:vda,
part:vda1(/boot),
part:vda2,
lvm:rhel-root(/),
lvm:rhel-swap([SWAP]),
disk:sda,
part:sda1(/data)]
>>> lsblk_info.rows[0]
disk:vda
>>> lsblk_info.rows[0].data
{'READ_ONLY': False, 'NAME': 'vda', 'REMOVABLE': False, 'MAJ_MIN': '252:0',
'TYPE': 'disk', 'SIZE': '9G'}
>>> lsblk_info.rows[0].data['NAME']
'vda'
>>> lsblk_info.rows[0].NAME
'vda'
>>> lsblk_info.rows[0].name
'vda'
>>> lsblk_info.rows[0].data['MAJ_MIN']
'252:0'
>>> lsblk_info.rows[0].MAJ_MIN
'252:0'
>>> lsblk_info.rows[0].maj_min
'252:0'
>>> lsblk_info.rows[0].removable
False
>>> lsblk_info.rows[0].read_only
False
>>> lsblk_info.rows[2].data
{'READ_ONLY': False, 'PARENT_NAMES': ['vda'], 'NAME': 'vda2',
'REMOVABLE': False, 'MAJ_MIN': '252:2', 'TYPE': 'part', 'SIZE': '8.5G'}
>>> lsblk_info.rows[2].parent_names
['vda']
>>> lsblk_info.rows[3].parent_names
['vda', 'vda2']
>>> lsblk_info.device_data['vda'] # Access devices by name
'disk:vda'
>>> lsblk_info.search(NAME='vda2')
[{'READ_ONLY': False, 'PARENT_NAMES': ['vda'], 'NAME': 'vda2',
'REMOVABLE': False, 'MAJ_MIN': '252:2', 'TYPE': 'part', 'SIZE': '8.5G'}]
"""
import re
from .. import Parser, parser
from . import ParseException, keyword_search
from insights.specs import lsblk
from insights.specs import lsblk_pairs
MAX_GENERATIONS = 20
class BlockDevice(object):
"""Class to contain one line of ``lsblk`` command information.
Contains all of the fields for a single line of ``lsblk`` output.
Computed values are the column names except where the column
name is an invalid variable name in Python such as `MAJ:MIN`.
The ``get`` method is provided to access any value, including
those that are not valid names in Python. All other valid
names may be accessed as ``obj.column_name``.
"""
def __init__(self, data):
self.data = data
for k, v in data.iteritems():
k = re.sub(r'[-:\.]', "_", k)
setattr(self, k, v)
setattr(self, k.lower(), v)
def __contains__(self, item):
return hasattr(self, item)
def __eq__(self, other):
return self.data == other
def iteritems(self):
return self.data.iteritems()
def get(self, k, default=None):
"""Get any value by keyword (column) name."""
return self.data.get(k, default)
def __str__(self):
if 'TYPE' in self.data and 'MOUNTPOINT' in self.data:
return '{type}:{name}({mnt})'.format(
type=self.data['TYPE'], name=self.data['NAME'],
mnt=self.data['MOUNTPOINT']
)
else:
# As long as the regular expression in LsBlock works, we must end
# up with NAME and TYPE records here. In LSBlockPairs this is
# enforced with an explicit check.
return '{type}:{name}'.format(type=self.data['TYPE'], name=self.data['NAME'])
class BlockDevices(Parser):
"""Class to contain all information from ``lsblk`` command.
Output of the ``lsblk`` command is contained in this base
class. Data may be accessed via the iterator and each item
represents a row of output from the command in `dict` format.
Attributes:
rows (list of BlockDevice): List of ``BlockDevice`` objects for each
row of the input. Input column name matches key name except any
'-' is replaced with '_' and the following names are changed::
Column Name Key Name
MAJ:MIN MAJ_MIN
RM REMOVABLE
RO READD_ONLY
device_data (dict of BlockDevice): A dictionary of ``BlockDevice``
objects keyed on the 'NAME' column (e.g. ``sda`` or ``rhel-swap``)
"""
def __len__(self):
return len(self.rows)
def __iter__(self):
for row in self.rows:
yield row
def search(self, **kwargs):
"""
Returns a list of the block devices (in order) matching the given
criteria. Keys are searched for directly - see the
:py:func:`insights.parsers.keyword_search` utility function for more
details. If no search parameters are given, no rows are returned.
Keys need to be in all upper case, as they appear in the source data.
Examples:
>>> blockdevs.search(NAME='sda1')
[{'NAME': '/dev/sda1', 'TYPE': 'disk', 'SIZE', '80G', ...}]
>>> blockdevs.search(TYPE='lvm')
[{'NAME': 'volgrp01-root', 'TYPE': 'lvm', 'SIZE', '15G', ...}...]
Arguments:
**kwargs (dict): Dictionary of key-value pairs to search for.
Returns:
(list): The list of mount points matching the given criteria.
"""
return keyword_search(self.rows, **kwargs)
@parser(lsblk)
class LSBlock(BlockDevices):
"""Parse output of the ``lsblk`` command.
The specific lsblk commands are ``/bin/lsblk`` and ``/usr/bin/lsblk``.
Typical content of the ``lsblk`` command output looks like::
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 80G 0 disk
|-sda1 8:1 0 256M 0 part /boot
`-sda2 8:2 0 79.8G 0 part
|-volgrp01-root (dm-0) 253:0 0 15G 0 lvm /
`-volgrp01-swap (dm-1) 253:1 0 8G 0 lvm [SWAP]
Note:
See the discussion of the key ``PARENT_NAMES`` above.
"""
def parse_content(self, content):
r = re.compile(r"([\s\|\`\-]*)(\S+.*) (\d+:\d+)\s+(\d)\s+(\d+(\.\d)?[A-Z])\s+(\d)\s+([a-z]+)(.*)")
device_list = []
parents = [None] * MAX_GENERATIONS
for line in content[1:]:
name_match = r.match(line)
generation = 0
if name_match and len(name_match.groups()) == 9:
device = {}
name = name_match.group(2).strip()
generation = len(name_match.group(1)) / 2
parents[generation] = name
device['NAME'] = name
device['MAJ_MIN'] = name_match.group(3)
device['REMOVABLE'] = bool(int(name_match.group(4)))
device['SIZE'] = name_match.group(5)
device['READ_ONLY'] = bool(int(name_match.group(7)))
# TYPE is enforced by the regex, no need to check here
device['TYPE'] = name_match.group(8)
mountpoint = name_match.group(9).strip()
if len(mountpoint) > 0:
device['MOUNTPOINT'] = mountpoint
if generation > 0:
device['PARENT_NAMES'] = parents[:generation]
device_list.append(device)
self.rows = [BlockDevice(d) for d in device_list]
self.device_data = dict((dev.name, dev) for dev in self.rows)
@parser(lsblk_pairs)
class LSBlockPairs(BlockDevices):
"""Parse output of the ``lsblk -P -o`` command.
``lsblk`` command with ``-P -o`` options provides explicit selection of
output columns in keyword=value pairs.
The specific lsblk commands are ``/bin/lsblk -P -o column_names`` and
``/usr/bin/lsblk -P -o column_names``. Typical content of the ``lsblk``
command output looks like::
ALIGNMENT="0" DISC-ALN="0" DISC-GRAN="0B" DISC-MAX="0B" DISC-ZERO="0" \
FSTYPE="" GROUP="cdrom" KNAME="sr0" LABEL="" LOG-SEC="512" MAJ:MIN="11:0" \
MIN-IO="512" MODE="brw-rw----" MODEL="DVD+-RW DVD8801 " MOUNTPOINT="" \
NAME="sr0" OPT-IO="0" OWNER="root" PHY-SEC="512" RA="128" RM="1" RO="0" \
ROTA="1" RQ-SIZE="128" SCHED="cfq" SIZE="1024M" STATE="running" TYPE="rom" UUID=""
ALIGNMENT="0" DISC-ALN="0" DISC-GRAN="0B" DISC-MAX="0B" DISC-ZERO="0" \
FSTYPE="" GROUP="disk" KNAME="sda" LABEL="" LOG-SEC="512" MAJ:MIN="8:0" \
MIN-IO="512" MODE="brw-rw----" MODEL="WDC WD1600JS-75N" MOUNTPOINT="" \
NAME="sda" OPT-IO="0" OWNER="root" PHY-SEC="512" RA="128" RM="0" RO="0" \
ROTA="1" RQ-SIZE="128" SCHED="cfq" SIZE="149G" STATE="running" TYPE="disk" UUID=""
ALIGNMENT="0" DISC-ALN="0" DISC-GRAN="0B" DISC-MAX="0B" DISC-ZERO="0" \
FSTYPE="ext4" GROUP="disk" KNAME="sda1" LABEL="" LOG-SEC="512" MAJ:MIN="8:1" \
MIN-IO="512" MODE="brw-rw----" MODEL="" MOUNTPOINT="/boot" NAME="sda1" \
OPT-IO="0" OWNER="root" PHY-SEC="512" RA="128" RM="0" RO="0" ROTA="1" \
RQ-SIZE="128" SCHED="cfq" SIZE="500M" STATE="" TYPE="part" \
UUID="c7c4c016-8b00-4ded-bffb-5cc4719b7d45"
Attributes:
rows (list of BlockDevice): List of ``BlockDevice`` objects for each row of
the input. Input column name matches key name except that
any '-', ':', or '.' is replaced with
'_' and the following names are changed::
Column Name Key Name
RM removable
RO read_only
Note:
``PARENT_NAMES`` is not available as a key because it is not listed
in the ``LsBlockPairs`` output and cannot always be correctly
inferred from the other data present.
"""
def parse_content(self, content):
self.rows = []
for line in content:
d = dict((k, v) for k, v in re.findall(r'(\S+)=\"(.*?)\"\s?', line) if len(v) > 0)
def str2bool(s):
return bool(int(s))
if 'TYPE' not in d:
raise ParseException(
"TYPE not found in LsBlockPairs line '{l}'".format(l=line)
)
for original, replace, transform in [("RM", "REMOVABLE", str2bool),
("RO", "READ_ONLY", str2bool)]:
if original in d:
d[replace] = transform(d[original]) if transform else d[original]
del d[original]
self.rows.append(BlockDevice(d))
self.device_data = dict((dev.name, dev) for dev in self.rows)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import re
import subprocess
import time
import fixtures
from heatclient import exc as heat_exceptions
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from six.moves import urllib
import testscenarios
import testtools
from heat_integrationtests.common import clients
from heat_integrationtests.common import config
from heat_integrationtests.common import exceptions
from heat_integrationtests.common import remote_client
LOG = logging.getLogger(__name__)
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
def call_until_true(duration, sleep_for, func, *args, **kwargs):
"""
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func(*args, **kwargs):
return True
LOG.debug("Sleeping for %d seconds", sleep_for)
time.sleep(sleep_for)
now = time.time()
return False
def rand_name(name=''):
randbits = str(random.randint(1, 0x7fffffff))
if name:
return name + '-' + randbits
else:
return randbits
class HeatIntegrationTest(testscenarios.WithScenarios,
testtools.TestCase):
def setUp(self):
super(HeatIntegrationTest, self).setUp()
self.conf = config.init_conf()
self.assertIsNotNone(self.conf.auth_url,
'No auth_url configured')
self.assertIsNotNone(self.conf.username,
'No username configured')
self.assertIsNotNone(self.conf.password,
'No password configured')
self.manager = clients.ClientManager(self.conf)
self.identity_client = self.manager.identity_client
self.orchestration_client = self.manager.orchestration_client
self.compute_client = self.manager.compute_client
self.network_client = self.manager.network_client
self.volume_client = self.manager.volume_client
self.object_client = self.manager.object_client
self.metering_client = self.manager.metering_client
self.useFixture(fixtures.FakeLogger(format=_LOG_FORMAT))
self.updated_time = {}
def get_remote_client(self, server_or_ip, username, private_key=None):
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
network_name_for_ssh = self.conf.network_for_ssh
ip = server_or_ip.networks[network_name_for_ssh][0]
if private_key is None:
private_key = self.keypair.private_key
linux_client = remote_client.RemoteClient(ip, username,
pkey=private_key,
conf=self.conf)
try:
linux_client.validate_authentication()
except exceptions.SSHTimeout:
LOG.exception('ssh connection to %s failed' % ip)
raise
return linux_client
def check_connectivity(self, check_ip):
def try_connect(ip):
try:
urllib.request.urlopen('http://%s/' % ip)
return True
except IOError:
return False
timeout = self.conf.connectivity_timeout
elapsed_time = 0
while not try_connect(check_ip):
time.sleep(10)
elapsed_time += 10
if elapsed_time > timeout:
raise exceptions.TimeoutException()
def _log_console_output(self, servers=None):
if not servers:
servers = self.compute_client.servers.list()
for server in servers:
LOG.info('Console output for %s', server.id)
LOG.info(server.get_console_output())
def _load_template(self, base_file, file_name, sub_dir=None):
sub_dir = sub_dir or ''
filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
sub_dir, file_name)
with open(filepath) as f:
return f.read()
def create_keypair(self, client=None, name=None):
if client is None:
client = self.compute_client
if name is None:
name = rand_name('heat-keypair')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
def delete_keypair():
keypair.delete()
self.addCleanup(delete_keypair)
return keypair
def assign_keypair(self):
if self.conf.keypair_name:
self.keypair = None
self.keypair_name = self.conf.keypair_name
else:
self.keypair = self.create_keypair()
self.keypair_name = self.keypair.id
@classmethod
def _stack_rand_name(cls):
return rand_name(cls.__name__)
def _get_network(self, net_name=None):
if net_name is None:
net_name = self.conf.fixed_network_name
networks = self.network_client.list_networks()
for net in networks['networks']:
if net['name'] == net_name:
return net
@staticmethod
def _stack_output(stack, output_key, validate_errors=True):
"""Return a stack output value for a given key."""
value = None
for o in stack.outputs:
if validate_errors and 'output_error' in o:
# scan for errors in the stack output.
raise ValueError(
'Unexpected output errors in %s : %s' % (
output_key, o['output_error']))
if o['output_key'] == output_key:
value = o['output_value']
return value
def _ping_ip_address(self, ip_address, should_succeed=True):
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
return (proc.returncode == 0) == should_succeed
return call_until_true(
self.conf.build_timeout, 1, ping)
def _wait_for_all_resource_status(self, stack_identifier,
status, failure_pattern='^.*_FAILED$',
success_on_not_found=False):
for res in self.client.resources.list(stack_identifier):
self._wait_for_resource_status(
stack_identifier, res.resource_name,
status, failure_pattern=failure_pattern,
success_on_not_found=success_on_not_found)
def _wait_for_resource_status(self, stack_identifier, resource_name,
status, failure_pattern='^.*_FAILED$',
success_on_not_found=False):
"""Waits for a Resource to reach a given status."""
fail_regexp = re.compile(failure_pattern)
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
res = self.client.resources.get(
stack_identifier, resource_name)
except heat_exceptions.HTTPNotFound:
if success_on_not_found:
return
# ignore this, as the resource may not have
# been created yet
else:
if res.resource_status == status:
return
wait_for_action = status.split('_')[0]
resource_action = res.resource_status.split('_')[0]
if (resource_action == wait_for_action and
fail_regexp.search(res.resource_status)):
raise exceptions.StackResourceBuildErrorException(
resource_name=res.resource_name,
stack_identifier=stack_identifier,
resource_status=res.resource_status,
resource_status_reason=res.resource_status_reason)
time.sleep(build_interval)
message = ('Resource %s failed to reach %s status within '
'the required time (%s s).' %
(resource_name, status, build_timeout))
raise exceptions.TimeoutException(message)
def _verify_status(self, stack, stack_identifier, status, fail_regexp):
if stack.stack_status == status:
# Handle UPDATE_COMPLETE case: Make sure we don't
# wait for a stale UPDATE_COMPLETE status.
if status == 'UPDATE_COMPLETE':
if self.updated_time.get(
stack_identifier) != stack.updated_time:
self.updated_time[stack_identifier] = stack.updated_time
return True
else:
return True
wait_for_action = status.split('_')[0]
if (stack.action == wait_for_action and
fail_regexp.search(stack.stack_status)):
# Handle UPDATE_FAILED case.
if status == 'UPDATE_FAILED':
if self.updated_time.get(
stack_identifier) != stack.updated_time:
self.updated_time[stack_identifier] = stack.updated_time
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack.stack_status,
stack_status_reason=stack.stack_status_reason)
else:
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack.stack_status,
stack_status_reason=stack.stack_status_reason)
def _wait_for_stack_status(self, stack_identifier, status,
failure_pattern='^.*_FAILED$',
success_on_not_found=False):
"""
Waits for a Stack to reach a given status.
Note this compares the full $action_$status, e.g
CREATE_COMPLETE, not just COMPLETE which is exposed
via the status property of Stack in heatclient
"""
fail_regexp = re.compile(failure_pattern)
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
stack = self.client.stacks.get(stack_identifier)
except heat_exceptions.HTTPNotFound:
if success_on_not_found:
return
# ignore this, as the resource may not have
# been created yet
else:
if self._verify_status(stack, stack_identifier, status,
fail_regexp):
return
time.sleep(build_interval)
message = ('Stack %s failed to reach %s status within '
'the required time (%s s).' %
(stack_identifier, status, build_timeout))
raise exceptions.TimeoutException(message)
def _stack_delete(self, stack_identifier):
try:
self.client.stacks.delete(stack_identifier)
except heat_exceptions.HTTPNotFound:
pass
self._wait_for_stack_status(
stack_identifier, 'DELETE_COMPLETE',
success_on_not_found=True)
def update_stack(self, stack_identifier, template, environment=None,
files=None, parameters=None, tags=None,
expected_status='UPDATE_COMPLETE',
disable_rollback=True):
env = environment or {}
env_files = files or {}
parameters = parameters or {}
stack_name = stack_identifier.split('/')[0]
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
self.client.stacks.update(
stack_id=stack_identifier,
stack_name=stack_name,
template=template,
files=env_files,
disable_rollback=disable_rollback,
parameters=parameters,
environment=env,
tags=tags
)
except heat_exceptions.HTTPConflict as ex:
# FIXME(sirushtim): Wait a little for the stack lock to be
# released and hopefully, the stack should be updatable again.
if ex.error['error']['type'] != 'ActionInProgress':
raise ex
time.sleep(build_interval)
else:
break
kwargs = {'stack_identifier': stack_identifier,
'status': expected_status}
if expected_status in ['ROLLBACK_COMPLETE']:
# To trigger rollback you would intentionally fail the stack
# Hence check for rollback failures
kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
self._wait_for_stack_status(**kwargs)
def assert_resource_is_a_stack(self, stack_identifier, res_name,
wait=False):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
time.sleep(build_interval)
try:
nested_identifier = self._get_nested_identifier(
stack_identifier, res_name)
except Exception:
# We may have to wait, if the create is in-progress
if wait:
time.sleep(build_interval)
else:
raise
else:
return nested_identifier
def _get_nested_identifier(self, stack_identifier, res_name):
rsrc = self.client.resources.get(stack_identifier, res_name)
nested_link = [l for l in rsrc.links if l['rel'] == 'nested']
nested_href = nested_link[0]['href']
nested_id = nested_href.split('/')[-1]
nested_identifier = '/'.join(nested_href.split('/')[-2:])
self.assertEqual(rsrc.physical_resource_id, nested_id)
nested_stack = self.client.stacks.get(nested_id)
nested_identifier2 = '%s/%s' % (nested_stack.stack_name,
nested_stack.id)
self.assertEqual(nested_identifier, nested_identifier2)
parent_id = stack_identifier.split("/")[-1]
self.assertEqual(parent_id, nested_stack.parent)
return nested_identifier
def list_resources(self, stack_identifier):
resources = self.client.resources.list(stack_identifier)
return dict((r.resource_name, r.resource_type) for r in resources)
def stack_create(self, stack_name=None, template=None, files=None,
parameters=None, environment=None, tags=None,
expected_status='CREATE_COMPLETE',
disable_rollback=True, enable_cleanup=True):
name = stack_name or self._stack_rand_name()
templ = template or self.template
templ_files = files or {}
params = parameters or {}
env = environment or {}
self.client.stacks.create(
stack_name=name,
template=templ,
files=templ_files,
disable_rollback=disable_rollback,
parameters=params,
environment=env,
tags=tags
)
if expected_status not in ['ROLLBACK_COMPLETE'] and enable_cleanup:
self.addCleanup(self._stack_delete, name)
stack = self.client.stacks.get(name)
stack_identifier = '%s/%s' % (name, stack.id)
kwargs = {'stack_identifier': stack_identifier,
'status': expected_status}
if expected_status:
if expected_status in ['ROLLBACK_COMPLETE']:
# To trigger rollback you would intentionally fail the stack
# Hence check for rollback failures
kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
self._wait_for_stack_status(**kwargs)
return stack_identifier
def stack_adopt(self, stack_name=None, files=None,
parameters=None, environment=None, adopt_data=None,
wait_for_status='ADOPT_COMPLETE'):
if (self.conf.skip_test_stack_action_list and
'ADOPT' in self.conf.skip_test_stack_action_list):
self.skipTest('Testing Stack adopt disabled in conf, skipping')
name = stack_name or self._stack_rand_name()
templ_files = files or {}
params = parameters or {}
env = environment or {}
self.client.stacks.create(
stack_name=name,
files=templ_files,
disable_rollback=True,
parameters=params,
environment=env,
adopt_stack_data=adopt_data,
)
self.addCleanup(self._stack_delete, name)
stack = self.client.stacks.get(name)
stack_identifier = '%s/%s' % (name, stack.id)
self._wait_for_stack_status(stack_identifier, wait_for_status)
return stack_identifier
def stack_abandon(self, stack_id):
if (self.conf.skip_test_stack_action_list and
'ABANDON' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_id)
self.skipTest('Testing Stack abandon disabled in conf, skipping')
info = self.client.stacks.abandon(stack_id=stack_id)
return info
def stack_suspend(self, stack_identifier):
if (self.conf.skip_test_stack_action_list and
'SUSPEND' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_identifier)
self.skipTest('Testing Stack suspend disabled in conf, skipping')
stack_name = stack_identifier.split('/')[0]
self.client.actions.suspend(stack_name)
# improve debugging by first checking the resource's state.
self._wait_for_all_resource_status(stack_identifier,
'SUSPEND_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
def stack_resume(self, stack_identifier):
if (self.conf.skip_test_stack_action_list and
'RESUME' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_identifier)
self.skipTest('Testing Stack resume disabled in conf, skipping')
stack_name = stack_identifier.split('/')[0]
self.client.actions.resume(stack_name)
# improve debugging by first checking the resource's state.
self._wait_for_all_resource_status(stack_identifier,
'RESUME_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'RESUME_COMPLETE')
def wait_for_event_with_reason(self, stack_identifier, reason,
rsrc_name=None, num_expected=1):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
rsrc_events = self.client.events.list(stack_identifier,
resource_name=rsrc_name)
except heat_exceptions.HTTPNotFound:
LOG.debug("No events yet found for %s" % rsrc_name)
else:
matched = [e for e in rsrc_events
if e.resource_status_reason == reason]
if len(matched) == num_expected:
return matched
time.sleep(build_interval)
|
|
# Copyright 2006-2007 Lukas Lalinsky
# Copyright 2005-2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: asf.py 4153 2007-08-05 07:07:49Z piman $
"""Read and write ASF (Window Media Audio) files."""
__all__ = ["ASF", "Open"]
import struct
from mutagen import FileType, Metadata
from mutagen._util import insert_bytes, delete_bytes, DictMixin
class error(IOError): pass
class ASFError(error): pass
class ASFHeaderError(error): pass
class ASFInfo(object):
"""ASF stream information."""
def __init__(self):
self.length = 0.0
self.sample_rate = 0
self.bitrate = 0
self.channels = 0
def pprint(self):
s = "Windows Media Audio %d bps, %s Hz, %d channels, %.2f seconds" % (
self.bitrate, self.sample_rate, self.channels, self.length)
return s
class ASFTags(list, DictMixin, Metadata):
"""Dictionary containing ASF attributes."""
def pprint(self):
return "\n".join(["%s=%s" % (k, v) for k, v in self])
def __getitem__(self, key):
"""A list of values for the key.
This is a copy, so comment['title'].append('a title') will not
work.
"""
values = [value for (k, value) in self if k == key]
if not values: raise KeyError, key
else: return values
def __delitem__(self, key):
"""Delete all values associated with the key."""
to_delete = filter(lambda x: x[0] == key, self)
if not to_delete: raise KeyError, key
else: map(self.remove, to_delete)
def __contains__(self, key):
"""Return true if the key has any values."""
for k, value in self:
if k == key: return True
else: return False
def __setitem__(self, key, values):
"""Set a key's value or values.
Setting a value overwrites all old ones. The value may be a
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
string.
"""
if not isinstance(values, list):
values = [values]
try: del(self[key])
except KeyError: pass
for value in values:
if key in _standard_attribute_names:
value = unicode(value)
elif not isinstance(value, ASFBaseAttribute):
if isinstance(value, basestring):
value = ASFUnicodeAttribute(value)
elif isinstance(value, bool):
value = ASFBoolAttribute(value)
elif isinstance(value, int):
value = ASFDWordAttribute(value)
elif isinstance(value, long):
value = ASFQWordAttribute(value)
self.append((key, value))
def keys(self):
"""Return all keys in the comment."""
return self and set(zip(*self)[0])
def as_dict(self):
"""Return a copy of the comment data in a real dict."""
d = {}
for key, value in self:
d.setdefault(key, []).append(value)
return d
class ASFBaseAttribute(object):
"""Generic attribute."""
TYPE = None
def __init__(self, value=None, data=None, language=None,
stream=None, **kwargs):
self.language = language
self.stream = stream
if data:
self.value = self.parse(data, **kwargs)
else:
self.value = value
def __repr__(self):
name = "%s(%r" % (type(self).__name__, self.value)
if self.language:
name += ", language=%d" % self.language
if self.stream:
name += ", stream=%d" % self.stream
name += ")"
return name
def render(self, name):
name = name.encode("utf-16-le") + "\x00\x00"
data = self._render()
return (struct.pack("<H", len(name)) + name +
struct.pack("<HH", self.TYPE, len(data)) + data)
def render_m(self, name):
name = name.encode("utf-16-le") + "\x00\x00"
if self.TYPE == 2:
data = self._render(dword=False)
else:
data = self._render()
return (struct.pack("<HHHHI", 0, self.stream or 0, len(name),
self.TYPE, len(data)) + name + data)
def render_ml(self, name):
name = name.encode("utf-16-le") + "\x00\x00"
if self.TYPE == 2:
data = self._render(dword=False)
else:
data = self._render()
return (struct.pack("<HHHHI", self.language or 0, self.stream or 0,
len(name), self.TYPE, len(data)) + name + data)
class ASFUnicodeAttribute(ASFBaseAttribute):
"""Unicode string attribute."""
TYPE = 0x0000
def parse(self, data):
return data.decode("utf-16").strip("\x00")
def _render(self):
return self.value.encode("utf-16-le") + "\x00\x00"
def __str__(self):
return self.value
def __cmp__(self, other):
return cmp(unicode(self), other)
class ASFByteArrayAttribute(ASFBaseAttribute):
"""Byte array attribute."""
TYPE = 0x0001
def parse(self, data):
return data
def _render(self):
return self.value
def __str__(self):
return "[binary data (%s bytes)]" % len(self.value)
def __cmp__(self, other):
return cmp(str(self), other)
class ASFBoolAttribute(ASFBaseAttribute):
"""Bool attribute."""
TYPE = 0x0002
def parse(self, data, dword=True):
if dword:
return struct.unpack("<I", data)[0] == 1
else:
return struct.unpack("<H", data)[0] == 1
def _render(self, dword=True):
if dword:
return struct.pack("<I", int(self.value))
else:
return struct.pack("<H", int(self.value))
def __bool__(self):
return self.value
def __str__(self):
return str(self.value)
def __cmp__(self, other):
return cmp(bool(self), other)
class ASFDWordAttribute(ASFBaseAttribute):
"""DWORD attribute."""
TYPE = 0x0003
def parse(self, data):
return struct.unpack("<L", data)[0]
def _render(self):
return struct.pack("<L", self.value)
def __int__(self):
return self.value
def __str__(self):
return str(self.value)
def __cmp__(self, other):
return cmp(int(self), other)
class ASFQWordAttribute(ASFBaseAttribute):
"""QWORD attribute."""
TYPE = 0x0004
def parse(self, data):
return struct.unpack("<Q", data)[0]
def _render(self):
return struct.pack("<Q", self.value)
def __int__(self):
return self.value
def __str__(self):
return str(self.value)
def __cmp__(self, other):
return cmp(int(self), other)
class ASFWordAttribute(ASFBaseAttribute):
"""WORD attribute."""
TYPE = 0x0005
def parse(self, data):
return struct.unpack("<H", data)[0]
def _render(self):
return struct.pack("<H", self.value)
def __int__(self):
return self.value
def __str__(self):
return str(self.value)
def __cmp__(self, other):
return cmp(int(self), other)
class ASFGUIDAttribute(ASFBaseAttribute):
"""GUID attribute."""
TYPE = 0x0006
def parse(self, data):
return data
def _render(self):
return self.value
def __str__(self):
return self.value
def __cmp__(self, other):
return cmp(str(self), other)
UNICODE = ASFUnicodeAttribute.TYPE
BYTEARRAY = ASFByteArrayAttribute.TYPE
BOOL = ASFBoolAttribute.TYPE
DWORD = ASFDWordAttribute.TYPE
QWORD = ASFQWordAttribute.TYPE
WORD = ASFWordAttribute.TYPE
GUID = ASFGUIDAttribute.TYPE
def ASFValue(value, kind, **kwargs):
for t, c in _attribute_types.items():
if kind == t:
return c(value=value, **kwargs)
raise ValueError("Unknown value type")
_attribute_types = {
ASFUnicodeAttribute.TYPE: ASFUnicodeAttribute,
ASFByteArrayAttribute.TYPE: ASFByteArrayAttribute,
ASFBoolAttribute.TYPE: ASFBoolAttribute,
ASFDWordAttribute.TYPE: ASFDWordAttribute,
ASFQWordAttribute.TYPE: ASFQWordAttribute,
ASFWordAttribute.TYPE: ASFWordAttribute,
ASFGUIDAttribute.TYPE: ASFGUIDAttribute,
}
_standard_attribute_names = [
"Title",
"Author",
"Copyright",
"Description",
"Rating"
]
class BaseObject(object):
"""Base ASF object."""
GUID = None
def parse(self, asf, data, fileobj, size):
self.data = data
def render(self, asf):
data = self.GUID + struct.pack("<Q", len(self.data) + 24) + self.data
size = len(data)
return data
class UnknownObject(BaseObject):
"""Unknown ASF object."""
def __init__(self, guid):
self.GUID = guid
class HeaderObject(object):
"""ASF header."""
GUID = "\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
class ContentDescriptionObject(BaseObject):
"""Content description."""
GUID = "\x33\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
def parse(self, asf, data, fileobj, size):
super(ContentDescriptionObject, self).parse(asf, data, fileobj, size)
asf.content_description_obj = self
lengths = struct.unpack("<HHHHH", data[:10])
texts = []
pos = 10
for length in lengths:
end = pos + length
texts.append(data[pos:end].decode("utf-16").strip("\x00"))
pos = end
(asf.tags["Title"], asf.tags["Author"], asf.tags["Copyright"],
asf.tags["Description"], asf.tags["Rating"]) = texts
def render(self, asf):
def render_text(name):
value = asf.tags.get(name, [])
if value and value[0]:
return value[0].encode("utf-16-le") + "\x00\x00"
else:
return ""
texts = map(render_text, _standard_attribute_names)
data = struct.pack("<HHHHH", *map(str.__len__, texts)) + "".join(texts)
return self.GUID + struct.pack("<Q", 24 + len(data)) + data
class ExtendedContentDescriptionObject(BaseObject):
"""Extended content description."""
GUID = "\x40\xA4\xD0\xD2\x07\xE3\xD2\x11\x97\xF0\x00\xA0\xC9\x5E\xA8\x50"
def parse(self, asf, data, fileobj, size):
super(ExtendedContentDescriptionObject, self).parse(asf, data, fileobj, size)
asf.extended_content_description_obj = self
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in range(num_attributes):
name_length, = struct.unpack("<H", data[pos:pos+2])
pos += 2
name = data[pos:pos+name_length].decode("utf-16").strip("\x00")
pos += name_length
value_type, value_length = struct.unpack("<HH", data[pos:pos+4])
pos += 4
value = data[pos:pos+value_length]
pos += value_length
attr = _attribute_types[value_type](data=value)
asf.tags.append((name, attr))
def render(self, asf):
attrs = asf.to_extended_content_description.items()
data = "".join([attr.render(name) for (name, attr) in attrs])
data = struct.pack("<QH", 26 + len(data), len(attrs)) + data
return self.GUID + data
class FilePropertiesObject(BaseObject):
"""File properties."""
GUID = "\xA1\xDC\xAB\x8C\x47\xA9\xCF\x11\x8E\xE4\x00\xC0\x0C\x20\x53\x65"
def parse(self, asf, data, fileobj, size):
super(FilePropertiesObject, self).parse(asf, data, fileobj, size)
length, _, preroll = struct.unpack("<QQQ", data[40:64])
asf.info.length = length / 10000000.0 - preroll / 1000.0
class StreamPropertiesObject(BaseObject):
"""Stream properties."""
GUID = "\x91\x07\xDC\xB7\xB7\xA9\xCF\x11\x8E\xE6\x00\xC0\x0C\x20\x53\x65"
def parse(self, asf, data, fileobj, size):
super(StreamPropertiesObject, self).parse(asf, data, fileobj, size)
channels, sample_rate, bitrate = struct.unpack("<HII", data[56:66])
asf.info.channels = channels
asf.info.sample_rate = sample_rate
asf.info.bitrate = bitrate * 8
class HeaderExtensionObject(BaseObject):
"""Header extension."""
GUID = "\xb5\x03\xbf_.\xa9\xcf\x11\x8e\xe3\x00\xc0\x0c Se"
def parse(self, asf, data, fileobj, size):
super(HeaderExtensionObject, self).parse(asf, data, fileobj, size)
asf.header_extension_obj = self
datasize, = struct.unpack("<I", data[18:22])
datapos = 0
self.objects = []
while datapos < datasize:
guid, size = struct.unpack("<16sQ", data[22+datapos:22+datapos+24])
if guid in _object_types:
obj = _object_types[guid]()
else:
obj = UnknownObject(guid)
obj.parse(asf, data[22+datapos+24:22+datapos+size], fileobj, size)
self.objects.append(obj)
datapos += size
def render(self, asf):
data = "".join([obj.render(asf) for obj in self.objects])
return (self.GUID + struct.pack("<Q", 24 + 16 + 6 + len(data)) +
"\x11\xD2\xD3\xAB\xBA\xA9\xcf\x11" +
"\x8E\xE6\x00\xC0\x0C\x20\x53\x65" +
"\x06\x00" + struct.pack("<I", len(data)) + data)
class MetadataObject(BaseObject):
"""Metadata description."""
GUID = "\xea\xcb\xf8\xc5\xaf[wH\x84g\xaa\x8cD\xfaL\xca"
def parse(self, asf, data, fileobj, size):
super(MetadataObject, self).parse(asf, data, fileobj, size)
asf.metadata_obj = self
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in range(num_attributes):
(reserved, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos+12])
pos += 12
name = data[pos:pos+name_length].decode("utf-16").strip("\x00")
pos += name_length
value = data[pos:pos+value_length]
pos += value_length
args = {'data': value, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = _attribute_types[value_type](**args)
asf.tags.append((name, attr))
def render(self, asf):
attrs = asf.to_metadata.items()
data = "".join([attr.render_m(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
class MetadataLibraryObject(BaseObject):
"""Metadata library description."""
GUID = "\x94\x1c#D\x98\x94\xd1I\xa1A\x1d\x13NEpT"
def parse(self, asf, data, fileobj, size):
super(MetadataLibraryObject, self).parse(asf, data, fileobj, size)
asf.metadata_library_obj = self
num_attributes, = struct.unpack("<H", data[0:2])
pos = 2
for i in range(num_attributes):
(language, stream, name_length, value_type,
value_length) = struct.unpack("<HHHHI", data[pos:pos+12])
pos += 12
name = data[pos:pos+name_length].decode("utf-16").strip("\x00")
pos += name_length
value = data[pos:pos+value_length]
pos += value_length
args = {'data': value, 'language': language, 'stream': stream}
if value_type == 2:
args['dword'] = False
attr = _attribute_types[value_type](**args)
asf.tags.append((name, attr))
def render(self, asf):
attrs = asf.to_metadata_library
data = "".join([attr.render_ml(name) for (name, attr) in attrs])
return (self.GUID + struct.pack("<QH", 26 + len(data), len(attrs)) +
data)
_object_types = {
ExtendedContentDescriptionObject.GUID: ExtendedContentDescriptionObject,
ContentDescriptionObject.GUID: ContentDescriptionObject,
FilePropertiesObject.GUID: FilePropertiesObject,
StreamPropertiesObject.GUID: StreamPropertiesObject,
HeaderExtensionObject.GUID: HeaderExtensionObject,
MetadataLibraryObject.GUID: MetadataLibraryObject,
MetadataObject.GUID: MetadataObject,
}
class ASF(FileType):
"""An ASF file, probably containing WMA or WMV."""
_mimes = ["audio/x-ms-wma", "audio/x-ms-wmv", "video/x-ms-asf",
"audio/x-wma", "video/x-wmv"]
def load(self, filename):
self.filename = filename
fileobj = file(filename, "rb")
try:
self.size = 0
self.size1 = 0
self.size2 = 0
self.offset1 = 0
self.offset2 = 0
self.num_objects = 0
self.info = ASFInfo()
self.tags = ASFTags()
self.__read_file(fileobj)
finally:
fileobj.close()
def save(self):
# Move attributes to the right objects
self.to_extended_content_description = {}
self.to_metadata = {}
self.to_metadata_library = []
for name, value in self.tags:
if name in _standard_attribute_names:
continue
if (value.language is None and value.stream is None and
name not in self.to_extended_content_description):
self.to_extended_content_description[name] = value
elif (value.language is None and value.stream is not None and
name not in self.to_metadata):
self.to_metadata[name] = value
else:
self.to_metadata_library.append((name, value))
# Add missing objects
if not self.content_description_obj:
self.content_description_obj = \
ContentDescriptionObject()
self.objects.append(self.content_description_obj)
if not self.extended_content_description_obj:
self.extended_content_description_obj = \
ExtendedContentDescriptionObject()
self.objects.append(self.extended_content_description_obj)
if not self.header_extension_obj:
self.header_extension_obj = \
HeaderExtensionObject()
self.objects.append(self.header_extension_obj)
if not self.metadata_obj:
self.metadata_obj = \
MetadataObject()
self.header_extension_obj.objects.append(self.metadata_obj)
if not self.metadata_library_obj:
self.metadata_library_obj = \
MetadataLibraryObject()
self.header_extension_obj.objects.append(self.metadata_library_obj)
# Render the header
data = "".join([obj.render(self) for obj in self.objects])
data = (HeaderObject.GUID +
struct.pack("<QL", len(data) + 30, len(self.objects)) +
"\x01\x02" + data)
fileobj = file(self.filename, "rb+")
try:
size = len(data)
if size > self.size:
insert_bytes(fileobj, size - self.size, self.size)
if size < self.size:
delete_bytes(fileobj, self.size - size, 0)
fileobj.seek(0)
fileobj.write(data)
finally:
fileobj.close()
def __read_file(self, fileobj):
header = fileobj.read(30)
if len(header) != 30 or header[:16] != HeaderObject.GUID:
raise ASFHeaderError, "Not an ASF file."
self.extended_content_description_obj = None
self.content_description_obj = None
self.header_extension_obj = None
self.metadata_obj = None
self.metadata_library_obj = None
self.size, self.num_objects = struct.unpack("<QL", header[16:28])
self.objects = []
for i in range(self.num_objects):
self.__read_object(fileobj)
def __read_object(self, fileobj):
guid, size = struct.unpack("<16sQ", fileobj.read(24))
if guid in _object_types:
obj = _object_types[guid]()
else:
obj = UnknownObject(guid)
data = fileobj.read(size - 24)
obj.parse(self, data, fileobj, size)
self.objects.append(obj)
def score(filename, fileobj, header):
return header.startswith(HeaderObject.GUID) * 2
score = staticmethod(score)
Open = ASF
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class systemglobal_authenticationlocalpolicy_binding(base_resource) :
""" Binding class showing the authenticationlocalpolicy that can be bound to systemglobal.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._builtin = []
self.___count = 0
@property
def priority(self) :
"""The priority of the command policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority of the command policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def builtin(self) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
@builtin.setter
def builtin(self, builtin) :
"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL
"""
try :
self._builtin = builtin
except Exception as e:
raise e
@property
def policyname(self) :
"""The name of the command policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""The name of the command policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(systemglobal_authenticationlocalpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.systemglobal_authenticationlocalpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = systemglobal_authenticationlocalpolicy_binding()
updateresource.policyname = resource.policyname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [systemglobal_authenticationlocalpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = systemglobal_authenticationlocalpolicy_binding()
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [systemglobal_authenticationlocalpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
""" Use this API to fetch a systemglobal_authenticationlocalpolicy_binding resources.
"""
try :
obj = systemglobal_authenticationlocalpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
""" Use this API to fetch filtered set of systemglobal_authenticationlocalpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_authenticationlocalpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
""" Use this API to count systemglobal_authenticationlocalpolicy_binding resources configued on NetScaler.
"""
try :
obj = systemglobal_authenticationlocalpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
""" Use this API to count the filtered set of systemglobal_authenticationlocalpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = systemglobal_authenticationlocalpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class systemglobal_authenticationlocalpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.systemglobal_authenticationlocalpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.systemglobal_authenticationlocalpolicy_binding = [systemglobal_authenticationlocalpolicy_binding() for _ in range(length)]
|
|
"""Contains the ShotGroup base class."""
from collections import deque
from mpf.core.device_monitor import DeviceMonitor
from mpf.core.events import event_handler
from mpf.core.mode import Mode
from mpf.core.mode_device import ModeDevice
from mpf.core.player import Player
@DeviceMonitor("common_state", "rotation_enabled")
class ShotGroup(ModeDevice):
"""Represents a group of shots in a pinball machine by grouping together multiple `Shot` class devices.
This is used so you get get
"group-level" functionality, like shot rotation, shot group completion,
etc. This would be used for a group of rollover lanes, a bank of standups,
etc.
"""
config_section = 'shot_groups'
collection = 'shot_groups'
class_label = 'shot_group'
__slots__ = ["rotation_enabled", "profile", "rotation_pattern", "common_state"]
def __init__(self, machine, name):
"""Initialise shot group."""
super().__init__(machine, name)
self.rotation_enabled = None
self.profile = None
self.rotation_pattern = None
self.common_state = None
def add_control_events_in_mode(self, mode) -> None:
"""Remove enable here."""
def device_loaded_in_mode(self, mode: Mode, player: Player):
"""Add device in mode."""
super().device_loaded_in_mode(mode, player)
self._check_for_complete()
self.profile = self.config['shots'][0].profile
self.rotation_pattern = deque(self.profile.config['rotation_pattern'])
self.rotation_enabled = not self.config['enable_rotation_events']
for shot in self.config['shots']:
self.machine.events.add_handler("{}_hit".format(shot.name), self._hit)
self.machine.events.add_handler("player_shot_{}".format(shot.name), self._check_for_complete)
def device_removed_from_mode(self, mode):
"""Disable device when mode stops."""
super().device_removed_from_mode(mode)
self.machine.events.remove_handler(self._hit)
self.machine.events.remove_handler(self._check_for_complete)
def get_common_state(self):
"""Return common state if all shots in this group are in the same state.
Will return None otherwise.
"""
state = self.config['shots'][0].state_name
for shot in self.config['shots']:
if state != shot.state_name:
# shots do not have a common state
return None
return state
def _check_for_complete(self, **kwargs):
"""Check if all shots in this group are in the same state."""
del kwargs
state = self.get_common_state()
if state == self.common_state:
return
self.common_state = state
if not state:
# shots do not have a common state
return
# if we reached this point we got a common state
self.debug_log(
"Shot group is complete with state: %s", state)
self.machine.events.post('{}_complete'.format(self.name), state=state)
'''event: (name)_complete
desc: All the member shots in the shot group called (name)
are in the same state.
args:
state: name of the common state of all shots.
'''
self.machine.events.post('{}_{}_complete'.format(self.name, state))
'''event: (name)_(state)_complete
desc: All the member shots in the shot group called (name)
are in the same state named (state).
'''
@event_handler(2)
def event_enable(self, **kwargs):
"""Handle enable control event."""
del kwargs
self.enable()
def enable(self):
"""Enable all member shots."""
for shot in self.config['shots']:
shot.enable()
@event_handler(3)
def event_disable(self, **kwargs):
"""Handle disable control event."""
del kwargs
self.disable()
def disable(self):
"""Disable all member shots."""
for shot in self.config['shots']:
shot.disable()
@event_handler(1)
def event_reset(self, **kwargs):
"""Handle reset control event."""
del kwargs
self.reset()
def reset(self):
"""Reset all member shots."""
for shot in self.config['shots']:
shot.reset()
@event_handler(4)
def event_restart(self, **kwargs):
"""Handle restart control event."""
del kwargs
self.restart()
def restart(self):
"""Restart all member shots."""
for shot in self.config['shots']:
shot.restart()
def _hit(self, advancing, **kwargs):
"""One of the member shots in this shot group was hit.
Args:
----
kwarg: {
profile: the current profile of the member shot that was hit
state: the current state of the member shot that was hit
advancing: boolean of whether the state is advancing
}
"""
del advancing
self.machine.events.post(self.name + '_hit')
'''event: (name)_hit
desc: A member shots in the shot group called (name)
has been hit.
'''
self.machine.events.post("{}_{}_hit".format(self.name, kwargs['state']))
'''event: (name)_(state)_hit
desc: A member shot with state (state) in the shot group (name)
has been hit.
'''
@event_handler(9)
def event_enable_rotation(self, **kwargs):
"""Handle enable_rotation control event."""
del kwargs
self.enable_rotation()
def enable_rotation(self):
"""Enable shot rotation.
If disabled, rotation events do not actually rotate the shots.
"""
self.debug_log('Enabling rotation')
self.rotation_enabled = True
@event_handler(2)
def event_disable_rotation(self, **kwargs):
"""Handle disable rotation control event."""
del kwargs
self.disable_rotation()
def disable_rotation(self):
"""Disable shot rotation.
If disabled, rotation events do not actually rotate the shots.
"""
self.debug_log('Disabling rotation')
self.rotation_enabled = False
@event_handler(4)
def event_rotate(self, direction=None, **kwargs):
"""Handle rotate control event."""
del kwargs
self.rotate(direction)
def rotate(self, direction=None):
"""Rotate (or "shift") the state of all the shots in this group.
This is used for things like lane change, where hitting the flipper
button shifts all the states of the shots in the group to the left or
right.
This method actually transfers the current state of each shot profile
to the left or the right, and the shot on the end rolls over to the
taret on the other end.
Args:
----
direction: String that specifies whether the rotation direction is
to the left or right. Values are 'right' or 'left'. Default of
None will cause the shot group to rotate in the direction as
specified by the rotation_pattern.
Note that this shot group must, and rotation_events for this
shot group, must both be enabled for the rotation events to work.
"""
if not self.rotation_enabled:
self.debug_log("Received rotation request. "
"Rotation Enabled: %s. Will NOT rotate",
self.rotation_enabled)
return
# shot_state_list is deque of tuples (state num, show step num)
shot_state_list = deque()
shots_to_rotate = []
for shot in self.config['shots']:
if shot.can_rotate:
shots_to_rotate.append(shot)
shot_state_list.append(shot.state)
# figure out which direction we're going to rotate
if not direction:
direction = self.rotation_pattern[0]
self.rotation_pattern.rotate(-1)
self.debug_log("Since no direction was specified, pulling from"
" rotation pattern: '%s'", direction)
# rotate that list
if direction.lower() in ('right', 'r'):
shot_state_list.rotate(1)
else:
shot_state_list.rotate(-1)
# step through all our shots and update their states
for i, shot in enumerate(shots_to_rotate):
shot.jump(state=shot_state_list[i], force=True)
@event_handler(8)
def event_rotate_right(self, **kwargs):
"""Handle rotate right control event."""
del kwargs
self.rotate_right()
def rotate_right(self):
"""Rotate the state of the shots to the right.
This method is the same as calling rotate('right')
"""
self.rotate(direction='right')
@event_handler(7)
def event_rotate_left(self, **kwargs):
"""Handle rotate left control event."""
del kwargs
self.rotate_left()
def rotate_left(self):
"""Rotate the state of the shots to the left.
This method is the same as calling rotate('left')
"""
self.rotate(direction='left')
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import collections
import numbers
import string
import six
import re
from six.moves import filter, map, zip
from functools import total_ordering
from monty.fractions import gcd, gcd_float
from pymatgen.core.periodic_table import get_el_sp, Element
from pymatgen.util.string_utils import formula_double_format
from monty.json import MSONable
from pymatgen.core.units import unitized
"""
This module implements a Composition class to represent compositions,
and a ChemicalPotential class to represent potentials.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "Nov 10, 2012"
@total_ordering
class Composition(collections.Hashable, collections.Mapping, MSONable):
"""
Represents a Composition, which is essentially a {element:amount} mapping
type. Composition is written to be immutable and hashable,
unlike a standard Python dict.
Note that the key can be either an Element or a Specie. Elements and Specie
are treated differently. i.e., a Fe2+ is not the same as a Fe3+ Specie and
would be put in separate keys. This differentiation is deliberate to
support using Composition to determine the fraction of a particular Specie.
Works almost completely like a standard python dictionary, except that
__getitem__ is overridden to return 0 when an element is not found.
(somewhat like a defaultdict, except it is immutable).
Also adds more convenience methods relevant to compositions, e.g.,
get_fraction.
It should also be noted that many Composition related functionality takes
in a standard string as a convenient input. For example,
even though the internal representation of a Fe2O3 composition is
{Element("Fe"): 2, Element("O"): 3}, you can obtain the amount of Fe
simply by comp["Fe"] instead of the more verbose comp[Element("Fe")].
>>> comp = Composition("LiFePO4")
>>> comp.get_atomic_fraction(Element("Li"))
0.14285714285714285
>>> comp.num_atoms
7.0
>>> comp.reduced_formula
'LiFePO4'
>>> comp.formula
'Li1 Fe1 P1 O4'
>>> comp.get_wt_fraction(Element("Li"))
0.04399794666951898
>>> comp.num_atoms
7.0
"""
"""
Tolerance in distinguishing different composition amounts.
1e-8 is fairly tight, but should cut out most floating point arithmetic
errors.
"""
amount_tolerance = 1e-8
"""
Special formula handling for peroxides and certain elements. This is so
that formula output does not write LiO instead of Li2O2 for example.
"""
special_formulas = {"LiO": "Li2O2", "NaO": "Na2O2", "KO": "K2O2",
"HO": "H2O2", "CsO": "Cs2O2", "RbO": "Rb2O2",
"O": "O2", "N": "N2", "F": "F2", "Cl": "Cl2",
"H": "H2"}
def __init__(self, *args, **kwargs): # allow_negative=False
"""
Very flexible Composition construction, similar to the built-in Python
dict(). Also extended to allow simple string init.
Args:
Any form supported by the Python built-in dict() function.
1. A dict of either {Element/Specie: amount},
{string symbol:amount}, or {atomic number:amount} or any mixture
of these. E.g., {Element("Li"):2 ,Element("O"):1},
{"Li":2, "O":1}, {3:2, 8:1} all result in a Li2O composition.
2. Keyword arg initialization, similar to a dict, e.g.,
Composition(Li = 2, O = 1)
In addition, the Composition constructor also allows a single
string as an input formula. E.g., Composition("Li2O").
allow_negative: Whether to allow negative compositions. This
argument must be popped from the \*\*kwargs due to \*args
ambiguity.
"""
self.allow_negative = kwargs.pop('allow_negative', False)
# it's much faster to recognize a composition and use the elmap than
# to pass the composition to dict()
if len(args) == 1 and isinstance(args[0], Composition):
elmap = args[0]
elif len(args) == 1 and isinstance(args[0], six.string_types):
elmap = self._parse_formula(args[0])
else:
elmap = dict(*args, **kwargs)
elamt = {}
self._natoms = 0
for k, v in elmap.items():
if v < -Composition.amount_tolerance and not self.allow_negative:
raise CompositionError("Amounts in Composition cannot be "
"negative!")
if abs(v) >= Composition.amount_tolerance:
elamt[get_el_sp(k)] = v
self._natoms += abs(v)
self._data = elamt
def __getitem__(self, item):
try:
sp = get_el_sp(item)
return self._data.get(sp, 0)
except ValueError as ex:
raise TypeError("Invalid key {}, {} for Composition\n"
"ValueError exception:\n{}".format(item,
type(item), ex))
def __len__(self):
return len(self._data)
def __iter__(self):
return self._data.keys().__iter__()
def __contains__(self, item):
try:
sp = get_el_sp(item)
return sp in self._data
except ValueError as ex:
raise TypeError("Invalid key {}, {} for Composition\n"
"ValueError exception:\n{}".format(item,
type(item), ex))
def __eq__(self, other):
# elements with amounts < Composition.amount_tolerance don't show up
# in the elmap, so checking len enables us to only check one
# compositions elements
if len(self) != len(other):
return False
for el, v in self.items():
if abs(v - other[el]) > Composition.amount_tolerance:
return False
return True
def __ge__(self, other):
"""
Defines >= for Compositions. Should ONLY be used for defining a sort
order (the behavior is probably not what you'd expect)
"""
for el in sorted(set(self.elements + other.elements)):
if other[el] - self[el] >= Composition.amount_tolerance:
return False
elif self[el] - other[el] >= Composition.amount_tolerance:
return True
return True
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Adds two compositions. For example, an Fe2O3 composition + an FeO
composition gives a Fe3O4 composition.
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] += v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __sub__(self, other):
"""
Subtracts two compositions. For example, an Fe2O3 composition - an FeO
composition gives an FeO2 composition.
Raises:
CompositionError if the subtracted composition is greater than the
original composition in any of its elements, unless allow_negative
is True
"""
new_el_map = collections.defaultdict(float)
new_el_map.update(self)
for k, v in other.items():
new_el_map[get_el_sp(k)] -= v
return Composition(new_el_map, allow_negative=self.allow_negative)
def __mul__(self, other):
"""
Multiply a Composition by an integer or a float.
Fe2O3 * 4 -> Fe8O12
"""
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] * other for el in self},
allow_negative=self.allow_negative)
__rmul__ = __mul__
def __truediv__(self, other):
if not isinstance(other, numbers.Number):
return NotImplemented
return Composition({el: self[el] / other for el in self},
allow_negative=self.allow_negative)
__div__ = __truediv__
def __hash__(self):
"""
Minimally effective hash function that just distinguishes between
Compositions with different elements.
"""
hashcode = 0
for el, amt in self.items():
if abs(amt) > Composition.amount_tolerance:
hashcode += el.Z
return hashcode
@property
def average_electroneg(self):
return sum((el.X * abs(amt) for el, amt in self.items())) / \
self.num_atoms
def almost_equals(self, other, rtol=0.1, atol=1e-8):
"""
Returns true if compositions are equal within a tolerance.
Args:
other (Composition): Other composition to check
rtol (float): Relative tolerance
atol (float): Absolute tolerance
"""
sps = set(self.elements + other.elements)
for sp in sps:
a = self[sp]
b = other[sp]
tol = atol + rtol * (abs(a) + abs(b)) / 2
if abs(b - a) > tol:
return False
return True
@property
def is_element(self):
"""
True if composition is for an element.
"""
return len(self) == 1
def copy(self):
return Composition(self, allow_negative=self.allow_negative)
@property
def formula(self):
"""
Returns a formula string, with elements sorted by electronegativity,
e.g., Li4 Fe4 P4 O16.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys(), key=lambda sym: get_el_sp(sym).X)
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def alphabetical_formula(self):
"""
Returns a formula string, with elements sorted by alphabetically
e.g., Fe4 Li4 O16 P4.
"""
sym_amt = self.get_el_amt_dict()
syms = sorted(sym_amt.keys())
formula = [s + formula_double_format(sym_amt[s], False) for s in syms]
return " ".join(formula)
@property
def element_composition(self):
"""
Returns the composition replacing any species by the corresponding
element.
"""
return Composition(self.get_el_amt_dict(),
allow_negative=self.allow_negative)
@property
def fractional_composition(self):
"""
Returns the normalized composition which the number of species sum to
1.
Returns:
Normalized composition which the number of species sum to 1.
"""
return self / self._natoms
@property
def reduced_composition(self):
"""
Returns the reduced composition,i.e. amounts normalized by greatest
common denominator. e.g., Composition("FePO4") for
Composition("Fe4P4O16").
"""
return self.get_reduced_composition_and_factor()[0]
def get_reduced_composition_and_factor(self):
"""
Calculates a reduced composition and factor.
Returns:
A normalized composition and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (Composition("LiFePO4"), 4).
"""
factor = self.get_reduced_formula_and_factor()[1]
return self / factor, factor
def get_reduced_formula_and_factor(self):
"""
Calculates a reduced formula and factor.
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li4Fe4P4O16 returns (LiFePO4, 4).
"""
all_int = all(abs(x - round(x)) < Composition.amount_tolerance
for x in self.values())
if not all_int:
return self.formula.replace(" ", ""), 1
d = {k: int(round(v)) for k, v in self.get_el_amt_dict().items()}
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor
def get_integer_formula_and_factor(self, max_denominator=10000):
"""
Calculates an integer formula and factor.
Args:
max_denominator (int): all amounts in the el:amt dict are
first converted to a Fraction with this maximum denominator
Returns:
A pretty normalized formula and a multiplicative factor, i.e.,
Li0.5O0.25 returns (Li2O, 0.25). O0.25 returns (O2, 0.125)
"""
el_amt = self.get_el_amt_dict()
g = gcd_float(list(el_amt.values()), 1 / max_denominator)
d = {k: round(v / g) for k, v in el_amt.items()}
(formula, factor) = reduce_formula(d)
if formula in Composition.special_formulas:
formula = Composition.special_formulas[formula]
factor /= 2
return formula, factor * g
@property
def reduced_formula(self):
"""
Returns a pretty normalized formula, i.e., LiFePO4 instead of
Li4Fe4P4O16.
"""
return self.get_reduced_formula_and_factor()[0]
@property
def elements(self):
"""
Returns view of elements in Composition.
"""
return list(self.keys())
def __str__(self):
return " ".join([
"{}{}".format(k, formula_double_format(v, ignore_ones=False))
for k, v in self.as_dict().items()])
@property
def num_atoms(self):
"""
Total number of atoms in Composition. For negative amounts, sum
of absolute values
"""
return self._natoms
@property
@unitized("amu")
def weight(self):
"""
Total molecular weight of Composition
"""
return sum([amount * el.atomic_mass
for el, amount in self.items()])
def get_atomic_fraction(self, el):
"""
Calculate atomic fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Atomic fraction for element el in Composition
"""
return abs(self[el]) / self._natoms
def get_wt_fraction(self, el):
"""
Calculate weight fraction of an Element or Specie.
Args:
el (Element/Specie): Element or Specie to get fraction for.
Returns:
Weight fraction for element el in Composition
"""
return get_el_sp(el).atomic_mass * abs(self[el]) / self.weight
def _parse_formula(self, formula):
"""
Args:
formula (str): A string formula, e.g. Fe2O3, Li3Fe2(PO4)3
Returns:
Composition with that formula.
"""
def get_sym_dict(f, factor):
sym_dict = collections.defaultdict(float)
for m in re.finditer(r"([A-Z][a-z]*)\s*([-*\.\d]*)", f):
el = m.group(1)
amt = 1
if m.group(2).strip() != "":
amt = float(m.group(2))
sym_dict[el] += amt * factor
f = f.replace(m.group(), "", 1)
if f.strip():
raise CompositionError("{} is an invalid formula!".format(f))
return sym_dict
m = re.search(r"\(([^\(\)]+)\)\s*([\.\d]*)", formula)
if m:
factor = 1
if m.group(2) != "":
factor = float(m.group(2))
unit_sym_dict = get_sym_dict(m.group(1), factor)
expanded_sym = "".join(["{}{}".format(el, amt)
for el, amt in unit_sym_dict.items()])
expanded_formula = formula.replace(m.group(), expanded_sym)
return self._parse_formula(expanded_formula)
return get_sym_dict(formula, 1)
@property
def anonymized_formula(self):
"""
An anonymized formula. Unique species are arranged in ordering of
increasing amounts and assigned ascending alphabets. Useful for
prototyping formulas. For example, all stoichiometric perovskites have
anonymized_formula ABC3.
"""
reduced = self.element_composition
if all(x == int(x) for x in self.values()):
reduced /= gcd(*(int(i) for i in self.values()))
anon = ""
for e, amt in zip(string.ascii_uppercase, sorted(reduced.values())):
if amt == 1:
amt_str = ""
elif abs(amt % 1) < 1e-8:
amt_str = str(int(amt))
else:
amt_str = str(amt)
anon += ("{}{}".format(e, amt_str))
return anon
def __repr__(self):
return "Comp: " + self.formula
@classmethod
def from_dict(cls, d):
"""
Creates a composition from a dict generated by as_dict(). Strictly not
necessary given that the standard constructor already takes in such an
input, but this method preserves the standard pymatgen API of having
from_dict methods to reconstitute objects generated by as_dict(). Allows
for easier introspection.
Args:
d (dict): {symbol: amount} dict.
"""
return cls(d)
def get_el_amt_dict(self):
"""
Returns:
Dict with element symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[e.symbol] += a
return d
def as_dict(self):
"""
Returns:
dict with species symbol and (unreduced) amount e.g.,
{"Fe": 4.0, "O":6.0} or {"Fe3+": 4.0, "O2-":6.0}
"""
d = collections.defaultdict(float)
for e, a in self.items():
d[str(e)] += a
return d
@property
def to_reduced_dict(self):
"""
Returns:
Dict with element symbol and reduced amount e.g.,
{"Fe": 2.0, "O":3.0}
"""
c = Composition(self.reduced_formula)
return c.as_dict()
@property
def to_data_dict(self):
"""
Returns:
A dict with many keys and values relating to Composition/Formula,
including reduced_cell_composition, unit_cell_composition,
reduced_cell_formula, elements and nelements.
"""
return {"reduced_cell_composition": self.to_reduced_dict,
"unit_cell_composition": self.as_dict(),
"reduced_cell_formula": self.reduced_formula,
"elements": self.as_dict().keys(),
"nelements": len(self.as_dict().keys())}
@staticmethod
def ranked_compositions_from_indeterminate_formula(fuzzy_formula,
lock_if_strict=True):
"""
Takes in a formula where capitilization might not be correctly entered,
and suggests a ranked list of potential Composition matches.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations
lock_if_strict (bool): If true, a properly entered formula will
only return the one correct interpretation. For example,
"Co1" will only return "Co1" if true, but will return both
"Co1" and "C1 O1" if false.
Returns:
A ranked list of potential Composition matches
"""
#if we have an exact match and the user specifies lock_if_strict, just
#return the exact match!
if lock_if_strict:
#the strict composition parsing might throw an error, we can ignore
#it and just get on with fuzzy matching
try:
comp = Composition(fuzzy_formula)
return [comp]
except (CompositionError, ValueError):
pass
all_matches = Composition._comps_from_fuzzy_formula(fuzzy_formula)
#remove duplicates
all_matches = list(set(all_matches))
#sort matches by rank descending
all_matches = sorted(all_matches,
key=lambda match: match[1], reverse=True)
all_matches = [m[0] for m in all_matches]
return all_matches
@staticmethod
def _comps_from_fuzzy_formula(fuzzy_formula, m_dict={}, m_points=0,
factor=1):
"""
A recursive helper method for formula parsing that helps in
interpreting and ranking indeterminate formulas.
Author: Anubhav Jain
Args:
fuzzy_formula (str): A formula string, such as "co2o3" or "MN",
that may or may not have multiple interpretations.
m_dict (dict): A symbol:amt dictionary from the previously parsed
formula.
m_points: Number of points gained from the previously parsed
formula.
factor: Coefficient for this parse, e.g. (PO4)2 will feed in PO4
as the fuzzy_formula with a coefficient of 2.
Returns:
A list of tuples, with the first element being a Composition and
the second element being the number of points awarded that
Composition intepretation.
"""
def _parse_chomp_and_rank(m, f, m_dict, m_points):
"""
A helper method for formula parsing that helps in interpreting and
ranking indeterminate formulas
Author: Anubhav Jain
Args:
m: A regex match, with the first group being the element and
the second group being the amount
f: The formula part containing the match
m_dict: A symbol:amt dictionary from the previously parsed
formula
m_points: Number of points gained from the previously parsed
formula
Returns:
A tuple of (f, m_dict, points) where m_dict now contains data
from the match and the match has been removed (chomped) from
the formula f. The "goodness" of the match determines the
number of points returned for chomping. Returns
(None, None, None) if no element could be found...
"""
points = 0
# Points awarded if the first element of the element is correctly
# specified as a capital
points_first_capital = 100
# Points awarded if the second letter of the element is correctly
# specified as lowercase
points_second_lowercase = 100
#get element and amount from regex match
el = m.group(1)
if len(el) > 2 or len(el) < 1:
raise CompositionError("Invalid element symbol entered!")
amt = float(m.group(2)) if m.group(2).strip() != "" else 1
#convert the element string to proper [uppercase,lowercase] format
#and award points if it is already in that format
char1 = el[0]
char2 = el[1] if len(el) > 1 else ""
if char1 == char1.upper():
points += points_first_capital
if char2 and char2 == char2.lower():
points += points_second_lowercase
el = char1.upper() + char2.lower()
#if it's a valid element, chomp and add to the points
if Element.is_valid_symbol(el):
if el in m_dict:
m_dict[el] += amt * factor
else:
m_dict[el] = amt * factor
return f.replace(m.group(), "", 1), m_dict, m_points + points
#else return None
return None, None, None
fuzzy_formula = fuzzy_formula.strip()
if len(fuzzy_formula) == 0:
# The entire formula has been parsed into m_dict. Return the
# corresponding Composition and number of points
if m_dict:
yield (Composition.from_dict(m_dict), m_points)
else:
# if there is a parenthesis, remove it and match the remaining stuff
# with the appropriate factor
for mp in re.finditer(r"\(([^\(\)]+)\)([\.\d]*)", fuzzy_formula):
mp_points = m_points
mp_form = fuzzy_formula.replace(mp.group(), " ", 1)
mp_dict = dict(m_dict)
mp_factor = 1 if mp.group(2) == "" else float(mp.group(2))
# Match the stuff inside the parenthesis with the appropriate
# factor
for match in \
Composition._comps_from_fuzzy_formula(mp.group(1),
mp_dict,
mp_points,
factor=mp_factor):
only_me = True
# Match the stuff outside the parentheses and return the
# sum.
for match2 in \
Composition._comps_from_fuzzy_formula(mp_form,
mp_dict,
mp_points,
factor=1):
only_me = False
yield (match[0] + match2[0], match[1] + match2[1])
# if the stuff inside the parenthesis is nothing, then just
# return the stuff inside the parentheses
if only_me:
yield match
return
# try to match the single-letter elements
m1 = re.match(r"([A-z])([\.\d]*)", fuzzy_formula)
if m1:
m_points1 = m_points
m_form1 = fuzzy_formula
m_dict1 = dict(m_dict)
(m_form1, m_dict1, m_points1) = \
_parse_chomp_and_rank(m1, m_form1, m_dict1, m_points1)
if m_dict1:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form1,
m_dict1,
m_points1,
factor):
yield match
#try to match two-letter elements
m2 = re.match(r"([A-z]{2})([\.\d]*)", fuzzy_formula)
if m2:
m_points2 = m_points
m_form2 = fuzzy_formula
m_dict2 = dict(m_dict)
(m_form2, m_dict2, m_points2) = \
_parse_chomp_and_rank(m2, m_form2, m_dict2, m_points2)
if m_dict2:
#there was a real match
for match in \
Composition._comps_from_fuzzy_formula(m_form2, m_dict2,
m_points2,
factor):
yield match
def reduce_formula(sym_amt):
"""
Helper method to reduce a sym_amt dict to a reduced formula and factor.
Args:
sym_amt (dict): {symbol: amount}.
Returns:
(reduced_formula, factor).
"""
syms = sorted(sym_amt.keys(),
key=lambda s: get_el_sp(s).X)
syms = list(filter(lambda s: abs(sym_amt[s]) >
Composition.amount_tolerance, syms))
num_el = len(syms)
contains_polyanion = (num_el >= 3 and
get_el_sp(syms[num_el - 1]).X
- get_el_sp(syms[num_el - 2]).X < 1.65)
factor = 1
# Enforce integers for doing gcd.
if all((int(i) == i for i in sym_amt.values())):
factor = abs(gcd(*(int(i) for i in sym_amt.values())))
reduced_form = []
n = num_el - 2 if contains_polyanion else num_el
for i in range(0, n):
s = syms[i]
normamt = sym_amt[s] * 1.0 / factor
reduced_form.append(s)
reduced_form.append(formula_double_format(normamt))
if contains_polyanion:
poly_sym_amt = {syms[i]: sym_amt[syms[i]] / factor
for i in range(n, num_el)}
(poly_form, poly_factor) = reduce_formula(poly_sym_amt)
if poly_factor != 1:
reduced_form.append("({}){}".format(poly_form, int(poly_factor)))
else:
reduced_form.append(poly_form)
reduced_form = "".join(reduced_form)
return reduced_form, factor
class CompositionError(Exception):
"""Exception class for composition errors"""
pass
class ChemicalPotential(dict, MSONable):
"""
Class to represent set of chemical potentials. Can be:
multiplied/divided by a Number
multiplied by a Composition (returns an energy)
added/subtracted with other ChemicalPotentials.
"""
def __init__(self, *args, **kwargs):
"""
Args:
*args, **kwargs: any valid dict init arguments
"""
d = dict(*args, **kwargs)
super(ChemicalPotential, self).__init__((get_el_sp(k), v)
for k, v in d.items())
if len(d) != len(self):
raise ValueError("Duplicate potential specified")
def __mul__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v * other for k, v in self.items()})
else:
return NotImplemented
__rmul__ = __mul__
def __truediv__(self, other):
if isinstance(other, numbers.Number):
return ChemicalPotential({k: v / other for k, v in self.items()})
else:
return NotImplemented
__div__ = __truediv__
def __sub__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) - other.get(e, 0)
for e in els})
else:
return NotImplemented
def __add__(self, other):
if isinstance(other, ChemicalPotential):
els = set(self.keys()).union(other.keys())
return ChemicalPotential({e: self.get(e, 0) + other.get(e, 0)
for e in els})
else:
return NotImplemented
def get_energy(self, composition, strict=True):
"""
Calculates the energy of a composition.
Args:
composition (Composition): input composition
strict (bool): Whether all potentials must be specified
"""
if strict and set(composition.keys()) > set(self.keys()):
s = set(composition.keys()) - set(self.keys())
raise ValueError("Potentials not specified for {}".format(s))
return sum(self.get(k, 0) * v for k, v in composition.items())
def __repr__(self):
return "ChemPots: " + super(ChemicalPotential, self).__repr__()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
# -*- coding: utf-8 -*-
def test_datapackagejson_parse(testdir, datapackage):
"""Test loading of datapackage json file"""
testdir.makepyfile("""
def test_metadata_fixture(metadata):
assert metadata['title'] == 'Children by Family Type'
assert metadata['name'] == 'children-by-family-type'
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_metadata_fixture PASSED',
])
def test_geography_extraction(testdir, datapackage):
"""Test geography extraction fixture"""
testdir.makepyfile("""
def test_metadata_geography(geography):
assert geography == 'Town'
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_metadata_geography PASSED',
])
assert result.ret == 0
def test_years_extract(testdir, datapackage):
"""Test years extraction fixture"""
testdir.makepyfile("""
def test_metadata_years(years):
assert years == ["2016", "2015", "2014", "2013"]
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_metadata_years PASSED',
])
assert result.ret == 0
def test_dimension_group_list_setup(testdir, datapackage):
"""Test extraction of dimension groups as prerequisite for permutation test"""
testdir.makepyfile("""
def test_dimension_group_list(dimension_groups):
assert set(dimension_groups[0].keys()) == {"English Language Learner", "Grade"}
assert set(dimension_groups[1].keys()) == {"Students with Disabilities"}
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_dimension_group_list PASSED',
])
assert result.ret == 0
def test_dimension_permutations_dataset_one(testdir, datapackage):
"""Confirm that pytest can correctly use fixture to load yaml"""
testdir.makepyfile("""
def test_dimension_permutations(dimension_combinations):
assert len(dimension_combinations) == 10
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_dimension_permutations PASSED',
])
assert result.ret == 0
def test_dimension_permutations_dataset_two(testdir, housing_datapackage):
"""Confirm that pytest can correctly use fixture to load yaml"""
testdir.makepyfile("""
def test_dimension_permutations(dimension_combinations):
assert len(dimension_combinations) == 6
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_dimension_permutations PASSED',
])
assert result.ret == 0
def test_spotcheck_fixture(testdir, datapackage):
"""Test extraction of spotchecks from datapackage"""
testdir.makepyfile("""
def test_spotcheck_fixture(spotchecks):
assert len(spotchecks) == 3
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_spotcheck_fixture PASSED',
])
assert result.ret == 0
def test_datafile_load(testdir, datapackage, datafile):
testdir.makepyfile("""
def test_datafile_load(dataset):
assert len(dataset) == 3
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_datafile_load PASSED',
])
assert result.ret == 0
def test_housing_spotcheck_lookups(testdir, housing_datapackage, housing_datafile):
testdir.makepyfile("""
import pytest
def test_spotcheck_testing(spotcheck_results):
for check in spotcheck_results:
assert check.expected == check.actual
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_spotcheck_testing PASSED',
])
assert result.ret == 0
def test_spotcheck_lookups(testdir, datapackage, datafile):
testdir.makepyfile("""
import pytest
def test_spotcheck_testing(spotcheck_results):
for check in spotcheck_results:
assert check.expected == check.actual
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_spotcheck_testing PASSED',
])
assert result.ret == 0
def test_geoes_are_valid_towns(testdir, housing_datapackage, housing_datafile):
testdir.makepyfile("""
import pytest
import datapackage
def helper_filter(item, conditions):
for k,v in conditions:
if item[k] != v:
return False
return True
@pytest.fixture
def towns():
dp = datapackage.DataPackage(
'https://raw.githubusercontent.com/CT-Data-Collaborative/ct-town-list/master/datapackage.json')
return dp.resources[0].data
def test_geoes_are_valid_towns(towns, geographies):
assert set(geographies) == set([x['Town'] for x in towns])
""")
result = testdir.runpytest(
'-v'
)
result.stdout.fnmatch_lines([
'*::test_geoes_are_valid_towns PASSED',
])
assert result.ret == 0
def test_row_counts(testdir, housing_datapackage, housing_datafile):
testdir.makepyfile("""
def test_dataset_row_counts(rowcount):
assert rowcount.actual == rowcount.expected
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines([
'*::test_dataset_row_counts PASSED',
])
assert result.ret == 0
def test_domain(testdir, housing_datapackage):
testdir.makepyfile("""
def test_domain(domain):
assert domain == 'Housing'
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines([
'*::test_domain PASSED',
])
assert result.ret == 0
def test_subdomain(testdir, housing_datapackage):
testdir.makepyfile("""
def test_subdomain(subdomain):
assert subdomain == 'Housing Characteristics'
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines([
'*::test_subdomain PASSED',
])
assert result.ret == 0
def test_domain_subdomain_validation(testdir, housing_datapackage):
testdir.makepyfile("""
def test_domain_subdomain_validation(domain_map, domain, subdomain):
assert domain in domain_map
assert subdomain in domain_map[domain]
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines(['*::test_domain_subdomain_validation PASSED', ])
assert result.ret == 0
def test_source_validation(testdir, housing_datapackage):
testdir.makepyfile("""
def test_source_validation(source_options, source):
for s in source:
assert s['name'] in source_options
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines(['*::test_source_validation PASSED', ])
assert result.ret == 0
def test_schema_validate(testdir, housing_datapackage):
testdir.makepyfile("""
def test_schema_validation(schema):
dimensions = [s for s in schema if s['dimension']]
for d in dimensions:
assert isinstance(d["constraints"]["enum"], list)
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines(['*::test_schema_validation PASSED', ])
assert result.ret == 0
def test_schema_validate_2(testdir, housing_datapackage):
testdir.makepyfile("""
def test_schema_validation(schema_test):
assert schema_test
""")
result = testdir.runpytest('-v')
result.stdout.fnmatch_lines(['*::test_schema_validation PASSED', ])
assert result.ret == 0
|
|
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for hypothesis testing of psd_kernels."""
import collections
import contextlib
import inspect
import logging
import re
import hypothesis as hp
from hypothesis.extra import numpy as hpnp
import hypothesis.strategies as hps
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.distributions import marginal_fns
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.math import psd_kernels as tfpk
SPECIAL_KERNELS = [
'ChangePoint',
'FeatureScaled',
'KumaraswamyTransformed',
'PointwiseExponential',
'SchurComplement',
'SpectralMixture',
]
NON_INSTANTIABLE_SPECIAL_KERNELS = [
'AutoCompositeTensorPsdKernel',
'ExponentialCurve', # TODO(jburnim, srvasude): Enable this kernel.
'FeatureTransformed',
'PositiveSemidefiniteKernel',
]
class KernelInfo(collections.namedtuple(
'KernelInfo', ['cls', 'params_event_ndims'])):
"""Sufficient information to instantiate a Kernel.
To wit
- The Python class `cls` giving the class, and
- A Python dict `params_event_ndims` giving the event dimensions for the
parameters (so that parameters can be built with predictable batch shapes).
Specifically, the `params_event_ndims` dict maps string parameter names to
Python integers. Each integer gives how many (trailing) dimensions of that
parameter are part of the event.
"""
__slots__ = ()
def _instantiable_base_kernels():
"""Computes the table of mechanically instantiable base Kernels.
A Kernel is mechanically instantiable if
- The class appears as a symbol binding in `tfp.math.psd_kernels`;
- The class defines a `_params_event_ndims` method (necessary
to generate parameter Tensors with predictable batch shapes); and
- The name is not blocklisted in `SPECIAL_KERNELS`.
Compound kernels have their own
instantiation rules hard-coded in the `kernel` strategy.
Returns:
instantiable_base_kernels: A Python dict mapping kernel name
(as a string) to a `KernelInfo` carrying the information necessary to
instantiate it.
"""
result = {}
for kernel_name in dir(tfpk):
kernel_class = getattr(tfpk, kernel_name)
if (not inspect.isclass(kernel_class) or
not issubclass(kernel_class, tfpk.PositiveSemidefiniteKernel) or
kernel_name in SPECIAL_KERNELS or
kernel_name in NON_INSTANTIABLE_SPECIAL_KERNELS):
continue
try:
params_event_ndims = {
k: p.event_ndims
for (k, p) in kernel_class.parameter_properties().items()
if p.is_tensor and p.event_ndims is not None
}
except NotImplementedError:
logging.warning(
'Unable to test tfd.%s: `parameter_properties()` is not '
'implemented or does not define concrete (integer) `event_ndims` '
'for all parameters.',
kernel_name)
result[kernel_name] = KernelInfo(kernel_class, params_event_ndims)
return result
# INSTANTIABLE_BASE_KERNELS is a map from str->(KernelClass, params_event_ndims)
INSTANTIABLE_BASE_KERNELS = _instantiable_base_kernels()
del _instantiable_base_kernels
MUTEX_PARAMS = (
set(['length_scale', 'inverse_length_scale']),
set(['scale_diag', 'inverse_scale_diag']),
)
# pylint is unable to handle @hps.composite (e.g. complains "No value for
# argument 'batch_shape' in function call"), so disable this lint for the file.
# pylint: disable=no-value-for-parameter
class ConstrainToUnit(tfpk.FeatureTransformed):
"""Constrain inputs to `[0, 1]`."""
def __init__(self, kernel, validate_args=False):
parameters = dict(locals())
self._kernel = kernel
super(ConstrainToUnit, self).__init__(
kernel,
transformation_fn=lambda x, f, e: tf.math.sigmoid(x),
validate_args=validate_args,
parameters=parameters)
@property
def kernel(self):
return self._kernel
def _batch_shape(self):
return self.kernel.batch_shape
def _batch_shape_tensor(self):
return self.kernel.batch_shape_tensor()
def __getitem__(self, slices):
overrides = {}
if self.parameters.get('kernel', None) is not None:
overrides['kernel'] = self.kernel[slices]
return self.copy(**overrides)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(kernel=parameter_properties.BatchedComponentProperties())
@hps.composite
def kernel_input(
draw,
batch_shape,
example_dim=None,
example_ndims=None,
feature_dim=None,
feature_ndims=None,
enable_vars=False,
name=None):
"""Strategy for drawing arbitrary Kernel input.
In order to avoid duplicates (or even numerically near-duplicates), we
generate inputs on a grid. We let hypothesis generate the number of grid
points and distance between grid points, within some reasonable pre-defined
ranges. The result will be a batch of example sets, within which each set of
examples has no duplicates (but no such duplication avoidance is applied
accross batches).
Args:
draw: Hypothesis function supplied by `@hps.composite`.
batch_shape: `TensorShape`. The batch shape of the resulting
kernel input.
example_dim: Optional Python int giving the size of each example dimension.
If omitted, Hypothesis will choose one.
example_ndims: Optional Python int giving the number of example dimensions
of the input. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: If `False`, the returned parameters are all Tensors, never
Variables or DeferredTensor.
name: Name to give the variable.
Returns:
kernel_input: A strategy for drawing kernel_input with the prescribed shape
(or an arbitrary one if omitted).
"""
if example_ndims is None:
example_ndims = draw(hps.integers(min_value=1, max_value=2))
if example_dim is None:
example_dim = draw(hps.integers(min_value=2, max_value=4))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=1, max_value=2))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=4))
batch_shape = tensorshape_util.as_list(batch_shape)
example_shape = [example_dim] * example_ndims
feature_shape = [feature_dim] * feature_ndims
batch_size = int(np.prod(batch_shape))
example_size = example_dim ** example_ndims
feature_size = feature_dim ** feature_ndims
# We would like each batch of examples to be unique, to avoid computing kernel
# matrices that are semi-definite. hypothesis.extra.numpy.arrays doesn't have
# a sense of tolerance, so we need to do some extra work to get points
# sufficiently far from each other.
grid_size = draw(hps.integers(min_value=10, max_value=100))
grid_spacing = draw(hps.floats(min_value=1e-2, max_value=2))
hp.note('Grid size {} and spacing {}'.format(grid_size, grid_spacing))
def _grid_indices_to_values(grid_indices):
return (grid_spacing *
(np.array(grid_indices, dtype=np.float64) - np.float64(grid_size)))
# We'll construct the result by stacking onto flattened batch, example and
# feature dims, then reshape to unflatten at the end.
result = np.zeros([0, example_size, feature_size])
for _ in range(batch_size):
seen = set()
index_array_strategy = hps.tuples(
*([hps.integers(0, grid_size + 1)] * feature_size)).filter(
lambda x, seen=seen: x not in seen) # Default param to sate pylint.
examples = np.zeros([1, 0, feature_size])
for _ in range(example_size):
feature_grid_locations = draw(index_array_strategy)
seen.add(feature_grid_locations)
example = _grid_indices_to_values(feature_grid_locations)
example = example[np.newaxis, np.newaxis, ...]
examples = np.concatenate([examples, example], axis=1)
result = np.concatenate([result, examples], axis=0)
result = np.reshape(result, batch_shape + example_shape + feature_shape)
if enable_vars and draw(hps.booleans()):
result = tf.Variable(result, name=name)
if draw(hps.booleans()):
result = tfp_hps.defer_and_count_usage(result)
return result
@contextlib.contextmanager
def no_pd_errors():
"""Catch and ignore examples where a Cholesky decomposition fails.
This will typically occur when the matrix is not positive definite.
Yields:
None
"""
# TODO(b/174591555): Instead of catching and `assume`ing away positive
# definite errors, avoid them in the first place.
try:
yield
except tf.errors.OpError as e:
# NOTE: When tf.linalg.cholesky fails, it returns a matrix with nans on and
# below the diagonal. When we use the Cholesky decomposition in a solve,
# TF will raise an error that the matrix of nans is not invertible.
if re.search(r'Input matrix is not invertible', str(e)):
hp.assume(False)
else:
raise
@hps.composite
def broadcasting_params(draw,
kernel_name,
batch_shape,
event_dim=None,
enable_vars=False):
"""Draws a dict of parameters which should yield the given batch shape."""
if kernel_name not in INSTANTIABLE_BASE_KERNELS:
raise ValueError('Unknown Kernel name {}'.format(kernel_name))
params_event_ndims = INSTANTIABLE_BASE_KERNELS[kernel_name].params_event_ndims
def _constraint(param):
return constraint_for(kernel_name, param)
return draw(
tfp_hps.broadcasting_params(
batch_shape,
params_event_ndims,
event_dim=event_dim,
enable_vars=enable_vars,
constraint_fn_for=_constraint,
mutex_params=MUTEX_PARAMS,
dtype=np.float64))
def depths():
# TODO(b/139841600): Increase the depth after we can generate kernel inputs
# that are not too close to each other.
return hps.integers(min_value=0, max_value=1)
@hps.composite
def changepoints(
draw,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=None,
depth=None):
"""Strategy for drawing `Changepoint` kernels.
The underlying kernel is drawn from the `kernels` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
depth: Python `int` giving maximum nesting depth of compound kernel.
Returns:
kernels: A strategy for drawing `Changepoint` kernels with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=2, max_value=6))
num_kernels = draw(hps.integers(min_value=2, max_value=4))
inner_kernels = []
kernel_variable_names = []
for _ in range(num_kernels):
base_kernel, variable_names = draw(kernels(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=False,
depth=depth-1))
inner_kernels.append(base_kernel)
kernel_variable_names += variable_names
constraints = dict(
locs=lambda x: tf.cumsum(tf.math.abs(x) + 1e-3, axis=-1),
slopes=tfp_hps.softplus_plus_eps())
params = draw(tfp_hps.broadcasting_params(
batch_shape,
event_dim=num_kernels - 1,
params_event_ndims=dict(locs=1, slopes=1),
constraint_fn_for=constraints.get))
params = {k: tf.cast(params[k], tf.float64) for k in params}
if enable_vars and draw(hps.booleans()):
kernel_variable_names.append('locs')
kernel_variable_names.append('slopes')
params['locs'] = tf.Variable(params['locs'], name='locs')
params['slopes'] = tf.Variable(params['slopes'], name='slopes')
result_kernel = tfpk.ChangePoint(
kernels=inner_kernels, validate_args=True, **params)
return result_kernel, kernel_variable_names
@hps.composite
def feature_scaleds(
draw,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=None,
depth=None):
"""Strategy for drawing `FeatureScaled` kernels.
The underlying kernel is drawn from the `kernels` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
depth: Python `int` giving maximum nesting depth of compound kernel.
Returns:
kernels: A strategy for drawing `FeatureScaled` kernels with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=2, max_value=6))
base_kernel, kernel_variable_names = draw(kernels(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=False,
depth=depth-1))
scale_diag = tfp_hps.softplus_plus_eps()(draw(kernel_input(
batch_shape=batch_shape,
example_ndims=0,
feature_dim=feature_dim,
feature_ndims=feature_ndims)))
hp.note('Forming FeatureScaled kernel with scale_diag: {} '.format(
scale_diag))
if enable_vars and draw(hps.booleans()):
kernel_variable_names.append('scale_diag')
scale_diag = tf.Variable(scale_diag, name='scale_diag')
# Don't enable variable counting. This is because rescaling is
# done for each input, which will exceed two convert_to_tensor calls.
result_kernel = tfpk.FeatureScaled(
kernel=base_kernel,
scale_diag=scale_diag,
validate_args=True)
return result_kernel, kernel_variable_names
@hps.composite
def feature_transformeds(
draw,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=None,
depth=None):
"""Strategy for drawing `FeatureTransformed` kernels.
The underlying kernel is drawn from the `kernels` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
depth: Python `int` giving maximum nesting depth of compound kernel.
Returns:
kernels: A strategy for drawing `FeatureTransformed` kernels with the
specified `batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=2, max_value=6))
base_kernel, kernel_variable_names = draw(kernels(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth-1))
hp.note('Forming FeatureTransformed kernel')
result_kernel = tfpk.FeatureTransformed(
kernel=base_kernel,
transformation_fn=lambda x, feature_ndims, example_ndims: x ** 2.,
validate_args=True)
return result_kernel, kernel_variable_names
@hps.composite
def kumaraswamy_transformeds(
draw,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=None,
depth=None):
"""Strategy for drawing `KumaraswamyTransformed` kernels.
The underlying kernel is drawn from the `kernels` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
depth: Python `int` giving maximum nesting depth of compound kernel.
Returns:
kernels: A strategy for drawing `KumaraswamyTransformed` kernels with the
specified `batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=2, max_value=6))
base_kernel, _ = draw(kernels(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=False,
depth=depth-1))
concentration1 = constrain_to_range(1., 2.)(draw(kernel_input(
batch_shape=batch_shape,
example_ndims=0,
feature_dim=feature_dim,
feature_ndims=feature_ndims)))
concentration0 = constrain_to_range(1., 2.)(draw(kernel_input(
batch_shape=batch_shape,
example_ndims=0,
feature_dim=feature_dim,
feature_ndims=feature_ndims)))
concentrations = {
'concentration1': concentration1,
'concentration0': concentration0
}
kernel_variable_names = []
for param_name in concentrations:
if enable_vars and draw(hps.booleans()):
kernel_variable_names.append(param_name)
concentrations[param_name] = tf.Variable(
concentrations[param_name], name=param_name)
if draw(hps.booleans()):
concentrations[param_name] = tfp_hps.defer_and_count_usage(
concentrations[param_name])
hp.note('Forming KumaraswamyTransformed kernel with '
'concentrations: {}'.format(concentrations))
# We compose with a FeatureTransformed to ensure inputs are positive to
# Kumaraswamy.
result_kernel = tfpk.KumaraswamyTransformed(
kernel=base_kernel, validate_args=True, **concentrations)
result_kernel = ConstrainToUnit(kernel=result_kernel, validate_args=True)
return result_kernel, kernel_variable_names
@hps.composite
def pointwise_exponentials(
draw,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=None,
depth=None):
"""Strategy for drawing `PointwiseExponential` kernels.
The underlying kernel is drawn from the `kernels` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
depth: Python `int` giving maximum nesting depth of compound kernel.
Returns:
kernels: A strategy for drawing `FeatureScaled` kernels with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=2, max_value=6))
base_kernel, kernel_variable_names = draw(kernels(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth-1))
hp.note('Forming PointwiseExponential kernel.')
result_kernel = tfpk.PointwiseExponential(
kernel=base_kernel, validate_args=True)
return result_kernel, kernel_variable_names
@hps.composite
def schur_complements(
draw,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=None,
depth=None):
"""Strategy for drawing `SchurComplement` kernels.
The underlying kernel is drawn from the `kernels` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
depth: Python `int` giving maximum nesting depth of compound kernel.
Returns:
kernels: A strategy for drawing `SchurComplement` kernels with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=2, max_value=6))
base_kernel, kernel_variable_names = draw(kernels(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=False,
depth=depth-1))
# SchurComplement requires the inputs to have one example dimension.
fixed_inputs = draw(kernel_input(
batch_shape=batch_shape,
example_ndims=1,
feature_dim=feature_dim,
feature_ndims=feature_ndims))
# Positive shift to ensure the divisor matrix is PD.
diag_shift = np.float64(draw(hpnp.arrays(
dtype=np.float64,
shape=tensorshape_util.as_list(batch_shape),
elements=hps.floats(1, 100, allow_nan=False, allow_infinity=False))))
hp.note('Forming SchurComplement kernel with fixed_inputs: {} '
'and diag_shift: {}'.format(fixed_inputs, diag_shift))
schur_complement_params = {
'fixed_inputs': fixed_inputs,
'diag_shift': diag_shift
}
for param_name in schur_complement_params:
if enable_vars and draw(hps.booleans()):
kernel_variable_names.append(param_name)
schur_complement_params[param_name] = tf.Variable(
schur_complement_params[param_name], name=param_name)
if draw(hps.booleans()):
schur_complement_params[param_name] = tfp_hps.defer_and_count_usage(
schur_complement_params[param_name])
result_kernel = tfpk.SchurComplement(
base_kernel=base_kernel,
fixed_inputs=schur_complement_params['fixed_inputs'],
diag_shift=schur_complement_params['diag_shift'],
cholesky_fn=lambda x: marginal_fns.retrying_cholesky(x)[0],
validate_args=True)
return result_kernel, kernel_variable_names
@hps.composite
def spectral_mixtures(
draw,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=None,
depth=None):
"""Strategy for drawing `SpectralMixture` kernels.
The underlying kernel is drawn from the `kernels` strategy.
Args:
draw: Hypothesis strategy sampler supplied by `@hps.composite`.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
depth: Python `int` giving maximum nesting depth of compound kernel.
Returns:
kernels: A strategy for drawing `SchurComplement` kernels with the specified
`batch_shape` (or an arbitrary one if omitted).
"""
if depth is None:
depth = draw(depths())
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=2, max_value=6))
num_mixtures = draw(hps.integers(min_value=2, max_value=5))
logits = draw(kernel_input(
batch_shape=batch_shape,
example_ndims=0,
feature_dim=num_mixtures,
feature_ndims=1))
locs = draw(kernel_input(
batch_shape=batch_shape,
example_ndims=1,
example_dim=num_mixtures,
feature_dim=feature_dim,
feature_ndims=feature_ndims))
scales = tfp_hps.softplus_plus_eps()(draw(kernel_input(
batch_shape=batch_shape,
example_ndims=1,
example_dim=num_mixtures,
feature_dim=feature_dim,
feature_ndims=feature_ndims)))
hp.note(f'Forming SpectralMixture kernel with logits: {logits} '
f'locs: {locs} and scales: {scales}')
spectral_mixture_params = {'locs': locs, 'logits': logits, 'scales': scales}
kernel_variable_names = []
for param_name in spectral_mixture_params:
if enable_vars and draw(hps.booleans()):
kernel_variable_names.append(param_name)
spectral_mixture_params[param_name] = tf.Variable(
spectral_mixture_params[param_name], name=param_name)
if draw(hps.booleans()):
spectral_mixture_params[param_name] = tfp_hps.defer_and_count_usage(
spectral_mixture_params[param_name])
result_kernel = tfpk.SpectralMixture(
logits=spectral_mixture_params['logits'],
locs=spectral_mixture_params['locs'],
scales=spectral_mixture_params['scales'],
feature_ndims=feature_ndims,
validate_args=True)
return result_kernel, kernel_variable_names
@hps.composite
def base_kernels(
draw,
kernel_name=None,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=False):
"""Strategy for drawing kernels that don't depend on other kernels.
Args:
draw: Hypothesis function supplied by `@hps.composite`.
kernel_name: Optional Python `str`. If given, the produced kernels
will all have this type.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
Returns:
kernels: A strategy for drawing Kernels with the specified `batch_shape`
(or an arbitrary one if omitted).
kernel_variable_names: List of kernel parameters that are variables.
"""
if kernel_name is None:
kernel_name = draw(hps.sampled_from(sorted(INSTANTIABLE_BASE_KERNELS)))
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
if event_dim is None:
event_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_dim is None:
feature_dim = draw(hps.integers(min_value=2, max_value=6))
if feature_ndims is None:
feature_ndims = draw(hps.integers(min_value=2, max_value=6))
kernel_params = draw(
broadcasting_params(kernel_name, batch_shape, event_dim=event_dim,
enable_vars=enable_vars))
kernel_variable_names = [
k for k in kernel_params if tensor_util.is_ref(kernel_params[k])]
hp.note('Forming kernel {} with feature_ndims {} and constrained parameters '
'{}'.format(kernel_name, feature_ndims, kernel_params))
ctor = getattr(tfpk, kernel_name)
result_kernel = ctor(
validate_args=True,
feature_ndims=feature_ndims,
**kernel_params)
if batch_shape != result_kernel.batch_shape:
msg = ('Kernel strategy generated a bad batch shape '
'for {}, should have been {}.').format(result_kernel, batch_shape)
raise AssertionError(msg)
return result_kernel, kernel_variable_names
@hps.composite
def kernels(
draw,
kernel_name=None,
batch_shape=None,
event_dim=None,
feature_dim=None,
feature_ndims=None,
enable_vars=False,
depth=None):
"""Strategy for drawing arbitrary Kernels.
Args:
draw: Hypothesis function supplied by `@hps.composite`.
kernel_name: Optional Python `str`. If given, the produced kernels
will all have this type.
batch_shape: An optional `TensorShape`. The batch shape of the resulting
Kernel. Hypothesis will pick a batch shape if omitted.
event_dim: Optional Python int giving the size of each of the
kernel's parameters' event dimensions. This is shared across all
parameters, permitting square event matrices, compatible location and
scale Tensors, etc. If omitted, Hypothesis will choose one.
feature_dim: Optional Python int giving the size of each feature dimension.
If omitted, Hypothesis will choose one.
feature_ndims: Optional Python int stating the number of feature dimensions
inputs will have. If omitted, Hypothesis will choose one.
enable_vars: TODO(bjp): Make this `True` all the time and put variable
initialization in slicing_test. If `False`, the returned parameters are
all Tensors, never Variables or DeferredTensor.
depth: Python `int` giving maximum nesting depth of compound kernel.
Returns:
kernels: A strategy for drawing Kernels with the specified `batch_shape`
(or an arbitrary one if omitted).
kernel_variable_names: List of kernel parameters that are variables.
"""
if depth is None:
depth = draw(depths())
if kernel_name is None and depth > 0:
bases = hps.just(None)
compounds = hps.sampled_from(SPECIAL_KERNELS)
kernel_name = draw(hps.one_of([bases, compounds]))
if kernel_name is None or kernel_name in INSTANTIABLE_BASE_KERNELS:
return draw(
base_kernels(
kernel_name,
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars))
if kernel_name == 'ChangePoint':
return draw(changepoints(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth))
elif kernel_name == 'FeatureScaled':
return draw(feature_scaleds(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth))
elif kernel_name == 'FeatureTransformed':
return draw(feature_transformeds(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth))
elif kernel_name == 'KumaraswamyTransformed':
return draw(kumaraswamy_transformeds(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth))
elif kernel_name == 'PointwiseExponential':
return draw(pointwise_exponentials(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth))
elif kernel_name == 'SchurComplement':
return draw(schur_complements(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth))
elif kernel_name == 'SpectralMixture':
return draw(spectral_mixtures(
batch_shape=batch_shape,
event_dim=event_dim,
feature_dim=feature_dim,
feature_ndims=feature_ndims,
enable_vars=enable_vars,
depth=depth))
raise ValueError('Kernel name {} not found.'.format(kernel_name))
# This will be used for most positive parameters to ensure matrices
# are well-conditioned.
def constrain_to_range(low, high):
return lambda x: (high - low) * tf.math.sigmoid(x) + low
CONSTRAINTS = {
# Keep parameters large enough but not too large so matrices are
# well-conditioned. The ranges below were chosen to ensure kernel
# matrices are positive definite.
'amplitude': constrain_to_range(1., 2.),
'bias_variance': constrain_to_range(0.1, 0.5),
'constant': constrain_to_range(0.1, 0.5),
'concentration0': constrain_to_range(1., 2.),
'concentration1': constrain_to_range(1., 2.),
'df': constrain_to_range(2., 5.),
'scales': constrain_to_range(1., 2.),
'slope_variance': constrain_to_range(0.1, 0.5),
'exponent': lambda x: tf.math.floor(constrain_to_range(1, 4.)(x)),
'length_scale': constrain_to_range(1., 6.),
'inverse_length_scale': constrain_to_range(0., 2.),
'period': constrain_to_range(1., 6.),
'scale_mixture_rate': constrain_to_range(1., 6.),
# Ensure shift isn't too large such that all inputs are mapped
# to the same place.
'shift': lambda x: 5. * tf.math.tanh(x)
}
def constraint_for(kernel_name=None, param=None):
if param is not None:
return CONSTRAINTS.get('{}.{}'.format(kernel_name, param),
CONSTRAINTS.get(param, tfp_hps.identity_fn))
return CONSTRAINTS.get(kernel_name, tfp_hps.identity_fn)
|
|
import unittest
from kraken.core.maths.vec3 import Vec3
class TestVec3(unittest.TestCase):
def testString(self):
vec = Vec3()
self.assertEquals(str(vec),
"Vec3(0.0, 0.0, 0.0)")
def testGetPropertyValues(self):
vec = Vec3(0.0, 1.0, 2.0)
self.assertEquals(vec.x, 0.0)
self.assertEquals(vec.y, 1.0)
self.assertEquals(vec.z, 2.0)
def testSetPropertyValues(self):
vec = Vec3()
vec.x = 2
vec.y = 3
vec.z = 4
self.assertEquals(vec.x, 2.0)
self.assertEquals(vec.y, 3.0)
self.assertEquals(vec.z, 4.0)
def testEquals(self):
vec1 = Vec3(0.0, 1.0, 0.0)
vec2 = Vec3(0.0, 1.0, 0.0)
self.assertEqual(vec1, vec2)
def testNotEquals(self):
vec1 = Vec3(0.0, 1.0, 0.0)
vec2 = Vec3(2.0, 1.0, 0.0)
self.assertNotEqual(vec1, vec2)
def testAdd(self):
vec1 = Vec3(0.0, 1.0, 0.0)
vec2 = Vec3(0.0, 1.0, 0.0)
vec3 = vec1 + vec2
result = Vec3(0.0, 2.0, 0.0)
self.assertEqual(vec3, result)
def testSubtract(self):
vec1 = Vec3(3.0, 1.0, 0.0)
vec2 = Vec3(1.0, 0.25, 0.0)
vec3 = vec1 - vec2
result = Vec3(2.0, 0.75, 0.0)
self.assertEqual(vec3, result)
def testMultiply(self):
vec1 = Vec3(3.0, 1.0, 0.0)
vec2 = Vec3(1.0, 0.25, 0.0)
vec3 = vec1 * vec2
result = Vec3(3.0, 0.25, 0.0)
self.assertEqual(vec3, result)
def testDivide(self):
vec1 = Vec3(3.0, 1.0, 2.0)
vec2 = Vec3(1.0, 0.25, 1.0)
vec3 = vec1 / vec2
result = Vec3(3.0, 4.0, 2.0)
self.assertEqual(vec3, result)
def testClone(self):
vec1 = Vec3(3.0, 1.0, 2.0)
vec2 = vec1.clone()
self.assertIsNot(vec1, vec2)
self.assertEqual(vec1, vec2)
def testSet(self):
vec = Vec3()
vec.set(0.25, -1.05, 0.0)
self.assertEquals(vec.x, 0.25)
self.assertEquals(round(vec.y, 2), -1.05)
self.assertEquals(vec.z, 0.0)
def testSetNull(self):
vec = Vec3(1.0, 2.0, 3.0)
vec.setNull()
self.assertEquals(vec, Vec3())
def testAlmostEqualWithPrecision(self):
vec1 = Vec3(1.01, 2.0, 0.0)
vec2 = Vec3(1.0, 2.0, 0.0)
result = vec1.almostEqualWithPrecision(vec2, 0.1)
self.assertTrue(result)
def testAlmostEqual(self):
vec1 = Vec3(1.000001, 2.0, 0.0)
vec2 = Vec3(1.0, 2.0, 0.0)
self.assertTrue(vec1.almostEqual(vec2))
def testComponent(self):
vec = Vec3(1.0, 2.0, 3.0)
self.assertEquals(vec.component(0), 1.0)
self.assertEquals(vec.component(1), 2.0)
self.assertEquals(vec.component(2), 3.0)
def testSetComponent(self):
vec = Vec3()
vec.setComponent(0, 1.0)
vec.setComponent(1, 2.0)
vec.setComponent(2, 3.0)
self.assertEquals(vec.x, 1.0)
self.assertEquals(vec.y, 2.0)
self.assertEquals(vec.z, 3.0)
def testMultiplyScalar(self):
vec = Vec3(1.0, 1.0, 1.0)
result = vec.multiplyScalar(3)
self.assertEquals(result.x, 3.0)
self.assertEquals(result.y, 3.0)
self.assertEquals(result.z, 3.0)
def testDivideScalar(self):
vec = Vec3(1.0, 1.0, 1.0)
result = vec.divideScalar(2)
self.assertEquals(result.x, 0.5)
self.assertEquals(result.y, 0.5)
self.assertEquals(result.z, 0.5)
def testNegate(self):
vec = Vec3(3.0, 4.0, 5.0)
result = vec.negate()
self.assertEquals(result.x, -3.0)
self.assertEquals(result.y, -4.0)
self.assertEquals(result.z, -5.0)
def testInverse(self):
vec = Vec3(3.0, 4.0, 10.0)
result = vec.inverse()
self.assertEquals(result.x, 0.3333333432674408)
self.assertEquals(result.y, 0.25)
self.assertEquals(round(result.z, 2), 0.1)
def testDot(self):
vec1 = Vec3(0.0, 1.0, 0.0)
vec2 = Vec3(1.0, 0.0, 0.0)
result = vec1.dot(vec2)
self.assertEqual(result, 0.0)
def testCross(self):
vec1 = Vec3(0.0, 1.0, 0.0)
vec2 = Vec3(1.0, 0.0, 0.0)
result = vec1.cross(vec2)
self.assertEqual(result, Vec3(0.0, 0.0, -1.0))
def testLengthSquared(self):
vec = Vec3(2.0, 2.0, 2.0)
result = vec.lengthSquared()
self.assertEquals(result, 12.0)
def testLength(self):
vec = Vec3(1.0, 1.0, 1.0)
result = vec.length()
self.assertEquals(result, 1.7320507764816284)
def testUnit(self):
vec = Vec3(1.5, 3.0, 1.0)
result = vec.unit()
self.assertEquals(result, Vec3(0.428571432829, 0.857142865658, 0.285714298487))
def testUnit_safe(self):
vec = Vec3(0.001, 0.001, 0.001)
result = vec.unit_safe()
self.assertEquals(result, Vec3(0.577350258827, 0.577350258827, 0.577350258827))
def testSetUnit(self):
vec = Vec3(0.001, 0.001, 0.001)
result = vec.setUnit()
self.assertEquals(result, 0.0017320509068667889)
def testNormalize(self):
vec = Vec3(0.001, 0.001, 0.001)
vec.normalize()
self.assertEquals(vec, Vec3(0.577350258827, 0.577350258827, 0.577350258827))
def testClamp(self):
vec = Vec3(1.5, 3.0, 2.6)
result = vec.clamp(Vec3(0.25, 0.5, 0.75), Vec3(1.75, 2.75, 3.75))
self.assertEquals(result, Vec3(1.5, 2.75, 2.6))
def testUnitsAngleTo(self):
vec1 = Vec3(0.0, 1.0, -1.0).unit()
vec2 = Vec3(1.0, 0.0, 2.0).unit()
result = vec1.unitsAngleTo(vec2)
self.assertEquals(result, 2.2555155754089355)
def testAngleTo(self):
vec1 = Vec3(0.0, 1.0, -1.0)
vec2 = Vec3(1.0, 0.0, 2.0)
result = vec1.angleTo(vec2)
self.assertEquals(result, 2.2555155754089355)
def testDistanceTo(self):
vec1 = Vec3(0.0, 0.0, 0.0)
vec2 = Vec3(0.0, 1.0, 0.0)
result = vec1.distanceTo(vec2)
self.assertEquals(result, 1.0)
def testLinearInterpolate(self):
vec1 = Vec3(0.0, 0.0, 0.0)
vec2 = Vec3(0.0, 1.0, 0.0)
result = vec1.linearInterpolate(vec2, 0.5)
self.assertEquals(result, Vec3(0.0, 0.5))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestVec3)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
import json
import requests
from .Exceptions import *
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class admin(object):
"""Administrative functions"""
def _login(self):
"""
Obtain a token from the server for future requests
:return: Token
"""
login_url = '/api/admin/login'
full_url = self._url_builder_no_token(login_url)
resp = methods.post(full_url, self.sess, data={'username': self.uname, 'password': self.passwd})
login_token_dict = resp.json()
return login_token_dict['token']
def getPermToken(self):
"""
Get the permanent token for the server
:return: Permanent token
"""
perm_token_url = '/api/admin/permanenttoken'
return utilties._getURL(self, perm_token_url)
def shutdownServer(self):
"""
Shutdown the rest server
:return: dict
"""
shutdown_url = '/api/admin/shutdown'
return utilties._getURL(self, shutdown_url)
# full_url = self._url_builder(shutdown_url)
# resp = methods.get(full_url, self.sess)
# return resp.json()
def restartServer(self):
"""
Restart RESTFul server
:return: dict
"""
restart_url = '/api/admin/restart'
return utilties._getURL(self, restart_url)
# full_url = self._url_builder(restart_url)
# resp = methods.get(full_url, self.sess)
# return resp.json()
class utilties(object):
"""Utility HTTP methods"""
def check_version(self):
"""
Check the version of Empire
:param token: Token for authentication
:return: Version number
:return type: dict
"""
version_url = '/api/version'
return self._getURL(version_url)
# full_url = self._url_builder(version_url)
# resp = methods.get(full_url, self.sess)
# return resp.json()
def getMap(self):
"""
Get API map from server.
:return: dict
"""
map_url = '/api/map'
return self._getURL(map_url)
# full_url = self._url_builder(map_url)
# resp = methods.get(full_url, self.sess)
# return resp.json()
def getConfig(self):
"""
Get configuration of current server
:return: dict
"""
config_url = '/api/config'
return self._getURL(config_url)
# full_url = self._url_builder(config_url)
# resp = methods.get(full_url, self.sess)
# return resp.json()
def getCreds(self):
"""
Get the credentials stored in the database
:return: dict
"""
full_url = '/api/creds'
return self._getURL(full_url)
def _checkToken(self):
"""
Check if the token provided is authentic
:param token: Token provided
:return: bool
"""
# Check the version of Empire; no news is good news.
resp = utilties.check_version(self)
def _getURL(self, url):
"""
Base for simple GET requests
:param url:
:return:
"""
full_url = self._url_builder(url)
resp = methods.get(full_url, self.sess)
return resp.json()
def _postURL(self, url, payload=None):
"""
Base for simple GET requests
:param url:
:param data:
:rtype: dict
"""
full_url = self._url_builder(url)
resp = methods.post(full_url, self.sess, data=payload)
return resp.json()
def _delURL(self, url):
"""
Make DELETE request
:param url:
:rtype: dict
"""
full_url = self._url_builder(url)
resp = methods.del_req(full_url, self.sess)
return resp.json()
class reporting(object):
"""Class to hold all the report endpoints"""
def report(self):
"""
Return all logged events
:return: dict
"""
full_url = '/api/reporting'
return utilties._getURL(self, full_url)
def report_agent(self, agent_id):
"""
Get all logged events for a specific agent
:param agent_id: Agent name
:type agent_id: str
:return: dict
"""
full_url = '/api/reporting/agent/{}'.format(agent_id)
return utilties._getURL(self, full_url)
def report_type(self, type_id):
"""
Get all logged events of a specific type. Only accept event types named: checkin, task, result, rename
:param type_id: Event type as string
:type type_id: str
:return: dict
"""
valid_type = ['checkin', 'task', 'result', 'rename']
if type_id in valid_type:
full_url = '/api/reporting/type/{}'.format(type_id)
return utilties._getURL(self, full_url)
else:
raise InvalidLoggingType('The event type {} does not exist.'.format(type_id)) from None
def report_msg(self, msg_str):
"""
Return all logged events matching message Z, wildcards accepted
:param msg_str: Message to search for
:type msg_str: str
:return: dict
"""
full_url = '/api/reporting/msg/{}'.format(msg_str)
return utilties._getURL(self, full_url)
class stagers(object):
def get_stagers(self):
"""
Return all current stagers
:return: dict
"""
full_url = '/api/reporting'
return utilties._getURL(self, full_url)
def get_stager_by_name(self, name):
"""
Get stager by name
:param name: Name of stager to return
:return: dict
"""
full_url = '/api/stagers/{}'.format(name)
return utilties._getURL(self, full_url)
def gen_stager(self, StagerName, listener, **kwargs):
"""
Generate a stager
:param StagerName: Name of stager to call
:param Listener: Name of valid listener
:param kwargs: Other options
:return: dict
"""
full_url = '/api/stagers'
full_url = self._url_builder(full_url)
payload = {'Listener': listener, 'StagerName': StagerName}
return methods.post(full_url, self.sess, data=payload).json()
class modules(object):
def modules(self):
"""
All current modules
:return:
"""
full_url = '/api/modules'
return utilties._getURL(self, full_url)
def module_by_name(self, name):
"""
Return all modules with specified name
:param name: Name of stager
:return: dict
"""
full_url = '/api/modules/{}'.format(name)
return utilties._getURL(self, full_url)
def module_exec(self, name, options):
"""
Execute the given module with the specified options
Requires Agent to be in options
:param options: Dictionary of module options
:type options: dict
:rtype: dict
"""
full_url = '/api/modules/{}'.format(name)
return utilties._postURL(self, full_url, options)
def module_search(self, srch_str):
"""
Search modules for passed term
:param srch_str: Search term
:type srch_str: str
:rtype: dict
"""
full_url = '/api/modules/search'
data = {'term': srch_str}
return utilties._postURL(self, full_url, data)
def module_search_name(self, mod_name):
"""
Searches module names for a specific term
:rtype name: str
:rtype: dict
"""
# Takes {'term':'desc'}
full_url = '/api/modules/search/modulename'
data = {'term': mod_name}
return utilties._postURL(self, full_url, data)
def module_search_desc(self, desc):
"""
Searches module descriptions for a specific term
:rtype desc: str
:rtype: dict
"""
# Takes {'term':'desc'}
full_url = '/api/modules/search/description'
data = {'term': desc}
return utilties._postURL(self, full_url, data)
def module_search_comment(self, comment):
"""
Searches module comments for a specific term
:type comment: str
:rtype: dict
"""
# Takes {'term':'desc'}
full_url = '/api/modules/search/comments'
data = {'term': comment}
return utilties._postURL(self, full_url, data)
def module_search_author(self, author):
"""
Searches module authors for a specific term
:type author: str
:return:
"""
full_url = '/api/modules/search/author'
data ={'term': author}
return utilties._postURL(self, full_url, data)
class agents(object):
def agents(self):
"""
Return a list of all agents
:return: dict
"""
full_url = '/api/agents'
return utilties._getURL(self, full_url)
def agents_stale(self):
"""
Return a list of stale agents
:rtype: dict
"""
full_url = '/api/agents/stale'
return utilties._getURL(self, full_url)
def agents_del_stale(self):
"""
Delete stale agents
:rtype: dict
"""
full_url = '/api/agents/stale'
return utilties._delURL(self, full_url)
def agents_remove(self, name):
"""
Remove agents from database
:rtype: dict
"""
full_url = '/api/agents/{}'.format(name)
return utilties._delURL(self, full_url)
def agent_info(self, name):
"""
Returns JSON describing the agent specified by name.
:param name:
:rtype: dict
"""
full_url = '/api/agents/{}'.format(name)
return utilties._getURL(self, full_url)
def agent_shell_buffer(self, agent_name):
"""
Return tasking results for the agent
:param agent_name: Agent name as string
:rtype: dict
"""
final_url = '/api/agents/{}/results'.format(agent_name)
return utilties._getURL(self, final_url)
def agent_run_shell_cmd(self, agent_name, options):
"""
Task agent to run shell commdn
:param agent_name: Agent name
:param options: Dict of command
:rtype: dict
"""
final_url = '/api/agents/{}/shell'.format(agent_name)
return utilties._postURL(self, final_url, payload=options)
def agent_rename(self, current_name, new_name):
"""
Renames the specified agent
:param current_name:
:param new_name:
:return:
"""
# Takes {'newname':'NAME'}
final_url = '/api/agents/{}/rename'.format(current_name)
options = {'newname': 'new_name'}
return utilties._postURL(self, final_url, payload=options)
def agent_clear_buff(self, name):
"""
Clears the tasking buffer for the specified agent
:rtype: dict
"""
final_url = '/api/agents/{}/clear'.format(name)
return utilties._getURL(self, final_url)
def agent_kill(self, name):
"""
Tasks the specified agent to exit
:rtype: dict
"""
final_url = '/api/agents/{}/kill'.format(name)
return utilties._getURL(self, final_url)
class empireAPI(utilties, admin, reporting, stagers, modules, agents):
def __init__(self, host, port=1337, verify=False, token=None, uname=None, passwd=None):
"""
Information for the start of the class. You must include either a token or a username and password
:param host: IP or domain name to connect to
:param port: Port to connect to
:param verify: Requests verify the SSL chain
:param token: Token to authenticate with
:param uname: Username to authenticate with
:param passwd: Password to authenticate with
"""
# No parameters provided
if token is None and uname is None and passwd is None:
raise NoAuthenticationProvided('No authentication was provided.')
elif token is None and (uname is None or passwd is None): # Either uname or passwd but not both and no token
raise NoAuthenticationProvided('Incomplete authentication provided.')
# Check if host starts with 'https://' or 'http://'
if not (host.startswith('https://') or host.startswith('http://')):
# Append 'https:// to the beginning of the host
host = 'https://{}'.format(host)
self.host = host
self.port = port
self.verify = verify
self.token = token
self.uname = uname
self.passwd = passwd
# We should have all of the information needed now to open a connection
# Other variables to use
self.perm_token = None
# Create the session for Requests and consistency
self.sess = requests.Session()
self.sess.verify = False
self.sess.headers = {'Content-Type': 'application/json'}
# If token is provided, check the version to make sure it works
if token is not None:
self._checkToken()
else:
# If username and password are provided, get a token
self.token = admin._login(self)
def _url_builder(self, resource_location):
"""
Builds the complete URI
:param resource_location: Leading slash all the way to but not including the ?
:return: URI in a string.
"""
url = '{base}:{port}{location}?token={token}'.format(base=self.host, port=self.port,
location=resource_location, token=self.token)
return url
def _url_builder_no_token(self, resource_location):
"""
Builds a URL without a token parameter at the end
:param resource_location: Leading slash all the way to but not including the ?
:return: URI in a string.
"""
return '{base}:{port}{location}'.format(base=self.host, port=self.port, location=resource_location)
class methods:
"""All HTTP methods in use"""
@staticmethod
def httpErrors(resp):
status_code = resp.status_code
if status_code == 400:
# Bad Request
raise HTTPError.BadRequest(resp.json()['error']) from None
elif status_code == 401:
# Unauthorized
raise HTTPError.UnAuthorized(resp.json()['error']) from None
elif status_code == 405:
raise HTTPError.MethodNotAllowed(resp.json()['error']) from None
elif status_code != 200:
raise HTTPError.UnKnownHTTPError(resp.json()['error']) from None
@staticmethod
def get(url, sess):
"""Make a GET request"""
r = sess.get(url)
# Check for errors
methods.httpErrors(r)
# No news is good news
return r
@staticmethod
def post(url, sess, data=None):
"""Make a POST request"""
# dumps is there to ensure the data is properly formatted
r = sess.post(url, data=json.dumps(data))
# Check for errors
methods.httpErrors(r)
# No news is good news
return r
@staticmethod
def del_req(url, sess):
"""Make DELETE request"""
r = sess.delete(url)
# Check for errors
methods.httpErrors(r)
# No news is good news
return r
|
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Supports interface for looking at and reviewing previously cut a globes."""
import os
from common import portable_exceptions
import common.globe_directory
import common.portable_globe
import common.utils
from core import globe_info
WEB_URL_BASE = "/cutter/globes"
BASE_DIR = "../htdocs"
GLOBE_DIR = "%s%s" % (BASE_DIR, WEB_URL_BASE)
# Off by default. Set to desired file if needed.
LOG_FILE = "/tmp/gee_globe_info_log"
# Template for json used to return info about globe being served.
GLOBE_INFO_OBJ_TEMPLATE = """{
"name":"%s",
"timestamp":"%s",
"size":"%s",
"size_in_bytes":%s,
"description":"%s",
"path":"%s",
"is_gee":%s,
"is_2d":%s,
"is_3d":%s,
"has_polygon":%s,
"is_mercator":%s,
"is_being_served":%s
}"""
# Template for json used to return info about bad globe file.
BAD_FILE_INFO_OBJ_TEMPLATE = """{
"name":"%s",
"size":"%s",
"size_in_bytes":%s,
"path":"%s",
"is_gee":%s,
"message":"%s"
}"""
TABLE_ENTRY_TEMPLATE = ("<tr>"
" <td width='100px' align='right'>"
" <a href='%%s'>[$SHORT_NAME]</a></td>"
" <td>[$SIZE]</td>"
" <td><i>[$TIMESTAMP]</i></td>"
" <td>[$DESCRIPTION]</td>"
"</tr>")
def main():
logger = common.utils.Log(LOG_FILE, False)
try:
logger.Log(os.environ["REQUEST_URI"])
params = os.environ["REQUEST_URI"].split("/")
cmd = params[3]
server_name = os.environ["SCRIPT_URI"]
server_name = server_name[:server_name.find("/cgi-bin")]
try:
globe_name = params[4]
request = params[5].split("?")[0]
arguments = os.environ["REQUEST_URI"].split("?")[1]
except IndexError:
request = ""
if cmd == "serve":
globe = common.portable_globe.Globe("%s/%s" % (GLOBE_DIR, globe_name))
if globe.IsGee():
server = globe_info.Server(globe, logger)
logger.Log("Request: %s" % request)
if request == "flatfile":
server.FlatFileHandler(arguments)
elif request == "dbRoot.v5":
server.DbRootHandler()
elif request == "query":
server.QueryHandler(arguments)
else:
raise portable_exceptions.PortableException(
"Unknown request: %s" % request)
else:
raise portable_exceptions.PortableException(
"Corrupted glx: %s." % globe_name)
elif cmd == "dbroot_info":
common.utils.WriteHeader("application/json")
globe = common.portable_globe.Globe("%s/%s" % (GLOBE_DIR, globe_name))
print common.utils.GetDbrootInfoJson(globe, globe_name)
elif cmd == "list_globe_dir":
common.utils.WriteHeader("text/html")
globe_dir = common.globe_directory.GlobeDirectory(GLOBE_DIR, True)
content = common.utils.GlobesToText(
globe_dir.globes_, TABLE_ENTRY_TEMPLATE, "name")
print ("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN"
"\"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'>")
print "<html><body>"
print "<b>List of globes in</b> <i>%s</i>:" % GLOBE_DIR
print "<table cellspacing=10>"
print content
print "</table>"
print "</body></html>"
elif cmd == "info":
globe = common.portable_globe.Globe("%s/%s" % (GLOBE_DIR, globe_name))
if globe.IsGee():
content = globe.ReadFile("earth/info.txt")
common.utils.WriteHeader("text/html")
print ("<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN"
"\"\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'>")
print "<html><body><h2>Globe info for %s</h2><pre>" % globe_name
print content
print "</pre></body></html>"
else:
raise portable_exceptions.PortableException(
"Corrupted glx: %s." % globe_name)
elif cmd == "help":
common.utils.WriteHeader("text/plain")
print params
print os.environ
print params
print "/".join(params[5:])
elif cmd == "polygon":
globe_dir = common.globe_directory.GlobeDirectory(GLOBE_DIR, True)
polygon = globe_dir.globes_[globe_name]["globe"].Polygon()
common.utils.WriteHeader("text/plain")
if polygon.find("<kml") >= 0:
print polygon
elif cmd == "preview":
common.utils.WriteHeader("text/html")
if globe_name[-4:] == ".glb":
params = common.utils.GlobeNameReplaceParams(globe_name)
params["[$URL]"] = "%s/glc/%s" % (server_name, globe_name)
common.utils.OutputFile(
"%s/cutter/preview_globe.html" % BASE_DIR, params)
elif globe_name[-4:] == ".glc":
params = common.utils.GlobeNameReplaceParams(globe_name)
params["[$URL]"] = "%s/glc/%s/a" % (server_name, globe_name)
common.utils.OutputFile(
"%s/cutter/preview_glc.html" % BASE_DIR, params)
else:
common.utils.OutputFile(
"%s/cutter/preview_map.html" % BASE_DIR,
common.utils.GlobeNameReplaceParams(globe_name))
elif cmd == "globes":
globe_dir = common.globe_directory.GlobeDirectory(GLOBE_DIR, True)
common.utils.WriteHeader("text/plain")
globe_info_list = []
for globe_key in globe_dir.globes_.iterkeys():
globe = globe_dir.globes_[globe_key]
if globe["is_gee"]:
globe_info_list.append(GLOBE_INFO_OBJ_TEMPLATE % (
globe["name"], globe["timestamp"], globe["size"],
globe["size_in_bytes"], globe["description"], globe["path"],
common.utils.JsBoolString(globe["is_gee"]),
common.utils.JsBoolString(globe["is_2d"]),
common.utils.JsBoolString(globe["is_3d"]),
common.utils.JsBoolString(globe["has_polygon"]),
common.utils.JsBoolString(globe["is_mercator"]),
common.utils.JsBoolString(globe["is_being_served"])))
else:
globe_info_list.append(BAD_FILE_INFO_OBJ_TEMPLATE % (
globe["name"], globe["size"],
globe["size_in_bytes"], globe["path"],
common.utils.JsBoolString(globe["is_gee"]),
globe["message"]))
print "["
print ",\n".join(globe_info_list)
print "]"
else:
common.utils.WriteHeader("text/plain")
logger.Log("Unknown command: %s" % cmd)
print "Unknown command:", cmd
except Exception, e:
common.utils.WriteHeader("text/html")
print type(e), e
if __name__ == "__main__":
main()
|
|
import sys
import unittest
import io
import atexit
import os
from test import support
from test.support import script_helper
### helpers
def h1():
print("h1")
def h2():
print("h2")
def h3():
print("h3")
def h4(*args, **kwargs):
print("h4", args, kwargs)
def raise1():
raise TypeError
def raise2():
raise SystemError
def exit():
raise SystemExit
class GeneralTest(unittest.TestCase):
def setUp(self):
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.stream = io.StringIO()
sys.stdout = sys.stderr = self.stream
atexit._clear()
def tearDown(self):
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
atexit._clear()
def test_args(self):
# be sure args are handled properly
atexit.register(h1)
atexit.register(h4)
atexit.register(h4, 4, kw="abc")
atexit._run_exitfuncs()
self.assertEqual(self.stream.getvalue(),
"h4 (4,) {'kw': 'abc'}\nh4 () {}\nh1\n")
def test_badargs(self):
atexit.register(lambda: 1, 0, 0, (x for x in (1,2)), 0, 0)
self.assertRaises(TypeError, atexit._run_exitfuncs)
def test_order(self):
# be sure handlers are executed in reverse order
atexit.register(h1)
atexit.register(h2)
atexit.register(h3)
atexit._run_exitfuncs()
self.assertEqual(self.stream.getvalue(), "h3\nh2\nh1\n")
def test_raise(self):
# be sure raises are handled properly
atexit.register(raise1)
atexit.register(raise2)
self.assertRaises(TypeError, atexit._run_exitfuncs)
def test_raise_unnormalized(self):
# Issue #10756: Make sure that an unnormalized exception is
# handled properly
atexit.register(lambda: 1 / 0)
self.assertRaises(ZeroDivisionError, atexit._run_exitfuncs)
self.assertIn("ZeroDivisionError", self.stream.getvalue())
def test_exit(self):
# be sure a SystemExit is handled properly
atexit.register(exit)
self.assertRaises(SystemExit, atexit._run_exitfuncs)
self.assertEqual(self.stream.getvalue(), '')
def test_print_tracebacks(self):
# Issue #18776: the tracebacks should be printed when errors occur.
def f():
1/0 # one
def g():
1/0 # two
def h():
1/0 # three
atexit.register(f)
atexit.register(g)
atexit.register(h)
self.assertRaises(ZeroDivisionError, atexit._run_exitfuncs)
stderr = self.stream.getvalue()
self.assertEqual(stderr.count("ZeroDivisionError"), 3)
self.assertIn("# one", stderr)
self.assertIn("# two", stderr)
self.assertIn("# three", stderr)
def test_stress(self):
a = [0]
def inc():
a[0] += 1
for i in range(128):
atexit.register(inc)
atexit._run_exitfuncs()
self.assertEqual(a[0], 128)
def test_clear(self):
a = [0]
def inc():
a[0] += 1
atexit.register(inc)
atexit._clear()
atexit._run_exitfuncs()
self.assertEqual(a[0], 0)
def test_unregister(self):
a = [0]
def inc():
a[0] += 1
def dec():
a[0] -= 1
for i in range(4):
atexit.register(inc)
atexit.register(dec)
atexit.unregister(inc)
atexit._run_exitfuncs()
self.assertEqual(a[0], -1)
def test_bound_methods(self):
l = []
atexit.register(l.append, 5)
atexit._run_exitfuncs()
self.assertEqual(l, [5])
atexit.unregister(l.append)
atexit._run_exitfuncs()
self.assertEqual(l, [5])
def test_shutdown(self):
# Actually test the shutdown mechanism in a subprocess
code = """if 1:
import atexit
def f(msg):
print(msg)
atexit.register(f, "one")
atexit.register(f, "two")
"""
res = script_helper.assert_python_ok("-c", code)
self.assertEqual(res.out.decode().splitlines(), ["two", "one"])
self.assertFalse(res.err)
@support.cpython_only
class SubinterpreterTest(unittest.TestCase):
def test_callbacks_leak(self):
# This test shows a leak in refleak mode if atexit doesn't
# take care to free callbacks in its per-subinterpreter module
# state.
n = atexit._ncallbacks()
code = r"""if 1:
import atexit
def f():
pass
atexit.register(f)
del atexit
"""
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(atexit._ncallbacks(), n)
def test_callbacks_leak_refcycle(self):
# Similar to the above, but with a refcycle through the atexit
# module.
n = atexit._ncallbacks()
code = r"""if 1:
import atexit
def f():
pass
atexit.register(f)
atexit.__atexit = atexit
"""
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(atexit._ncallbacks(), n)
def test_callback_on_subinterpreter_teardown(self):
# This tests if a callback is called on
# subinterpreter teardown.
expected = b"The test has passed!"
r, w = os.pipe()
code = r"""if 1:
import os
import atexit
def callback():
os.write({:d}, b"The test has passed!")
atexit.register(callback)
""".format(w)
ret = support.run_in_subinterp(code)
os.close(w)
self.assertEqual(os.read(r, len(expected)), expected)
os.close(r)
if __name__ == "__main__":
unittest.main()
|
|
"""
This script contains code to create the img() and ds005() classes, which will
automatically complete all the grunt work required for a set of data before
statistical analyses can be performed on it. Future Python scripts can take
advantage of the img() and ds005() classes by including the command
sys.path.append("code/utils")
from make_class import *
"""
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import os, sys
from scipy.ndimage.filters import gaussian_filter
sys.path.append("code/utils")
from hrf import *
class img(object):
"""
This class organizes each file containing fMRI data and provides a quick way
to extract crucial information necessary for later statistical analyses.
"""
def __init__(self, file_path):
"""
Each object of this class created will contain the fMRI data that comes
from a single file. While keeping the original image, it also saves
critical attributes attached to the image for easy access. This class is
meant to be used exclusively within the ds005() class.
Parameters
----------
file_path : str
Path leading from the main project directory to the file containing
the fMRI BOLD signal data of interest
"""
# Load the fMRI image saved to the specified file
assert os.path.isfile(file_path), "nonexistent file for subject/run"
self.img = nib.load(file_path)
# Extract the BOLD data enclosed within the image
self.data = self.img.get_data()
# Extract the affine of the fMRI image
self.affine = self.img.affine
# Extract the voxel to mm conversion rate from the image affine
mm_per_voxel = abs(self.affine.diagonal()[:3])
self.voxels_per_mm = np.append(np.reciprocal(mm_per_voxel), 0)
def smooth(self, fwhm=5):
"""
Returns a given volume of the BOLD data after application of a Gaussian
filter with a standard deviation parameter of `sigma`
Parameters
----------
fwhm : float or np.ndarray(..., dtype=float), optional
Millimeter measurement of the full-width-at-half-maximum of the
Gaussian distribution whose kernel will be used in smoothing. If
np.ndarray(), shape must be (4,)
Return
------
smooth_data : np.ndarray
Array of shape self.data.shape
"""
if type(fwhm) == np.ndarray:
assert fwhm.shape == (4,), "invalid shape in fwhm"
assert fwhm.dtype in ["float_", "int_"], "invalid dtype in fwhm"
else:
assert type(fwhm) in [float, int], "invalid type in fwhm"
sigma_in_voxels = fwhm / np.sqrt(8 * np.log(2)) * self.voxels_per_mm
smooth_data = gaussian_filter(self.data, sigma_in_voxels)
return smooth_data
class ds005(object):
"""
This class allows organization of the data by runs. In addition to the
behavioral data, it also contains as subobjects the raw and filtered data.
"""
def __init__(self, sub_id, run_id, rm_nonresp=True):
"""
Each object of this class created contains both sets of fMRI data along
with the corresponding behavioral data.
Parameters
----------
sub_id : str
Unique key used to identify the subject (i.e., 001, ..., 016)
run_id : str
Unique key used to identify the run number (i.e, 001, ..., 003)
rm_nonresp : bool, optional
True removes trials that resulted in subject nonresponse
"""
# Save parts of the paths to the directories containing the data
path_sub = "data/ds005/sub%s/" % sub_id
path_run = "task001_run%s" % run_id
# Extract subject's behavioral data for the specified run
path_behav = path_sub + "behav/" + path_run + "/behavdata.txt"
# Read in all but the first line, which is a just a header.
raw = np.array([row.split() for row in list(open(path_behav))[1:]])
kept_rows = raw[:, 4] != "0" if rm_nonresp else np.arange(raw.shape[0])
rare = raw[kept_rows].astype("float")
# Calculate the distance to indifference--defined to be the euclidean
# distance from the gain/loss combination to the diagonal of the
# gain/loss matrix
gain, loss = rare[:, 1], rare[:, 2].astype(int)
gains = np.arange(10, 41, 2)
# The euclidean distance of a point from the diagonal is the length of
# the vector intersecting that point and orthogonal to the diagonal.
# Take the gain/loss combination to be one vertex of an isosceles right
# triangle. Then (`loss` - 5) gives the index of the gain in `gains` of
# the point that lies both on the diagonal and on the orthogonal vector
# defined above. Half the absolute value of the difference between the
# observed `gain` and this calculated gain (because `gains` increments
# by two) is the length of one leg of our triangle. We can then proceed
# to use this leg to calculate the triangle's hypotenuse, which then
# gives the perpendicular distance of the point to the diagonal.
rare[:, 3] = abs(gain - gains[loss - 5]) / np.sqrt(8)
self.behav = rare
# Extract subject's task condition data
path_cond = path_sub + "model/model001/onsets/" + path_run
conditions = ()
for condition in range(2, 5):
raw_matrix = list(open(path_cond + "/cond00%s.txt" % condition))
cond = np.array([row.split() for row in raw_matrix]).astype("float")
conditions += (cond,)
self.cond_gain, self.cond_loss, self.cond_dist2indiff = conditions
# Load raw and filtered fMRI images
self.raw = img(path_sub + "BOLD/" + path_run + "/bold.nii.gz")
self.filtered = img(path_sub + "model/model001/" + path_run +
".feat/filtered_func_data_mni.nii.gz")
def design_matrix(self, gain=True, loss=True, dist2indiff=True,
resp_time=False):
"""
Creates the design matrix from the object's stored behavioral data.
Parameters
----------
gain : bool, optional
True includes as a regressor parametric gains
loss : bool, optional
True includes as a regressor parametric losses
dist2indiff : bool, optional
True includes the regressor distance to indifference
resp_time : bool, optional
True includes as a regressor subject response time
Return
------
design_matrix : np.ndarray
Design matrix from subjects' behavioral data with a column for each
desired regressor and a row for each desired trial
"""
# Determine which columns of behav to consider as regressors
columns = [False, gain, loss, dist2indiff, False, False, resp_time]
n_regressors = columns.count(True) + 1
design_matrix = np.ones((self.behav.shape[0], n_regressors))
design_matrix[:, 1:n_regressors] = self.behav[:, np.array(columns)]
return design_matrix
def time_course(self, regressor, step_size=2):
"""
Generates predictions for the neural time course, with respect to a
regressor.
Parameters
----------
regressor : str
Name of regressor whose amplitudes will be used to generate the
time course: select from "gain", "loss", "dist2indiff"
step_size : float, optional
Size of temporal steps (in seconds) at which to generate predictions
Return
------
time_course : np.ndarray
1-D numpy array, containing 0s for time between trials and values
defined by the specified regressor for time during trials
"""
assert regressor in ["gain", "loss", "dist2indiff"], "invalid regressor"
condition = {"gain": self.cond_gain, "loss": self.cond_loss,
"dist2indiff": self.cond_dist2indiff}[regressor]
onsets = condition[:, 0] / step_size
periods, amplitudes = condition[:, 1] / step_size, condition[:, 2]
# The default time resolution in this study was two seconds
time_course = np.zeros(int(2 * self.raw.data.shape[3] / step_size))
for onset, period, amplitude in list(zip(onsets, periods, amplitudes)):
onset, period = int(np.floor(onset)), int(np.ceil(period))
time_course[onset:(onset + period)] = amplitude
return time_course
def convolution(self, regressor, step_size=2):
"""
Computes the predicted convolved hemodynamic response function signals
for a given regressor.
Parameters
----------
regressor : str
Name of the regressor whose predicted neural time course and whose
hemodynamic response function will be convolved: select from "gain",
"loss", "dist2indiff"
step_size : float
Size of temporal steps (in seconds) at which to generate signals
Return
------
convolution : np.ndarray
Array containing the predicted hemodynamic response function values
for the given regressor
"""
time_course = self.time_course(regressor, step_size)
# Hemodynamic responses typically last 30 seconds
hr_func = hrf(np.arange(0, 30, step_size))
convolution = np.convolve(time_course, hr_func)[:len(time_course)]
return convolution
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file runs value iteration on an aggregated state space.
It aggregates states using the supplied metric.
This module will run a number of trials on a set of possible metrics and compile
the results in a plot.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import gin
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from six.moves import range
import tensorflow.compat.v1 as tf
def greedy(metric, num_states, num_states_target, max_iterations,
verbose=False):
"""Greedily aggregate states until a desired number of aggregate states.
Args:
metric: matrix of distances.
num_states: int, number of total states.
num_states_target: int, desired number of states.
max_iterations: int, maximum number of iterations to run algorithm.
verbose: bool, whether to print verbose messages.
Returns:
list of aggregated states and list mapping state to its cluster.
"""
curr_metric = np.copy(metric)
# First we ensure that we won't aggregate states with themselves.
np.fill_diagonal(curr_metric, np.inf)
aggregate_states = [[x] for x in range(num_states)]
state_to_aggregate_states = list(range(num_states))
num_iterations = 1
while len(aggregate_states) > num_states_target:
# Pick a pair of the closest states randomly.
min_distance = np.min(curr_metric)
# We add a little epsilon here to avoid floating point precision issues.
x, y = np.where(curr_metric <= min_distance + 1e-8)
i = np.random.randint(len(x))
s, t = x[i], y[i]
# So we no longer try to aggregate these states.
curr_metric[s, t] = np.inf
curr_metric[t, s] = np.inf
# For simplicity we'll put the new aggregation at the front.
c1 = state_to_aggregate_states[s]
c2 = state_to_aggregate_states[t]
new_aggregate_states = [[]]
for c in [c1, c2]:
for s in aggregate_states[c]:
if s in new_aggregate_states[0]:
# If c1 == c2, this would cause duplicates which causes never-ending
# loops.
continue
new_aggregate_states[0].append(s)
state_to_aggregate_states[s] = 0
# Re-index all the other aggregations.
for i, c in enumerate(aggregate_states):
if i == c1 or i == c2:
continue
for s in c:
state_to_aggregate_states[s] = len(new_aggregate_states)
new_aggregate_states.append(c)
aggregate_states = new_aggregate_states
if num_iterations % 1000 == 0 and verbose:
logging.info('Iteration %d', num_iterations)
num_iterations += 1
if num_iterations > max_iterations:
break
return aggregate_states, state_to_aggregate_states
def k_medians(metric, num_states, num_states_target, max_iterations,
verbose=False):
"""Aggregate states using the k-medians algorithm.
Args:
metric: matrix of distances.
num_states: int, number of total states.
num_states_target: int, desired number of states.
max_iterations: int, maximum number of iterations to run algorithm.
verbose: bool, whether to print verbose messages.
Returns:
list of aggregated states and dict mapping state to its cluster.
"""
# Pick an initial set of centroids.
centroids = np.random.choice(num_states, size=num_states_target,
replace=False)
state_to_centroid = [0 for _ in range(num_states)]
for k, s in enumerate(centroids):
state_to_centroid[s] = k
# We first put each state in a random cluster.
for s in range(num_states):
if s in centroids:
continue
k = s % num_states_target
state_to_centroid[s] = k
clusters_changing = True
num_iterations = 1
while clusters_changing:
clusters_changing = False
clusters = [[x] for x in centroids]
for s in range(num_states):
if s in centroids:
continue
nearest_centroid = 0
smallest_distance = np.inf
for k, t in enumerate(centroids):
if metric[s, t] < smallest_distance:
smallest_distance = metric[s, t]
nearest_centroid = k
if nearest_centroid != state_to_centroid[s]:
clusters_changing = True
state_to_centroid[s] = nearest_centroid
clusters[nearest_centroid].append(s)
# Re-calculate centroids.
for k, c in enumerate(clusters):
min_avg_distance = np.inf
new_centroid = 0
for s in c:
avg_distance = 0.
for t in c:
avg_distance += metric[s, t]
avg_distance /= len(c)
if avg_distance < min_avg_distance:
min_avg_distance = avg_distance
new_centroid = s
centroids[k] = new_centroid
if num_iterations % 1000 == 0 and verbose:
logging.info('Iteration %d', num_iterations)
num_iterations += 1
if num_iterations > max_iterations:
break
return clusters, state_to_centroid
@gin.configurable
def value_iteration(env, aggregate_states, tolerance=0.001, verbose=False):
r"""Run value iteration on the aggregate MDP.
This constructs a new MDP using the aggregate states as follows:
```
R(c, a) = 1/|c| * \sum_{s \in c} R(s, a)
P(c, a)(c') = 1/|c| * \sum_{s \in c}\sum_{s' \in c'} P(s, a)(s')
```
Args:
env: the original environment.
aggregate_states: list of aggregate states.
tolerance: float, maximum difference in value between successive
iterations. Once this threshold is past, computation stops.
verbose: bool, whether to print verbose messages.
Returns:
list of floats representing cluster values.
"""
num_clusters = len(aggregate_states)
transition_probs = np.zeros((num_clusters, env.num_actions, num_clusters))
rewards = np.zeros((num_clusters, env.num_actions))
for c1 in range(num_clusters):
for a in range(env.num_actions):
for s1 in aggregate_states[c1]:
rewards[c1, a] += env.rewards[s1, a]
for c2 in range(num_clusters):
for s2 in aggregate_states[c2]:
transition_probs[c1, a, c2] += env.transition_probs[s1, a, s2]
rewards[c1, a] /= len(aggregate_states[c1])
transition_probs[c1, a, :] /= len(aggregate_states[c1])
q_values = np.zeros((num_clusters, env.num_actions))
error = tolerance * 2.
num_iterations = 1
while error > tolerance:
for c in range(num_clusters):
for a in range(env.num_actions):
old_q_values = np.copy(q_values[c, a])
q_values[c, a] = rewards[c, a] + env.gamma * np.matmul(
transition_probs[c, a, :], np.max(q_values, axis=1))
error = np.max(abs(q_values[c, a] - old_q_values))
if num_iterations % 1000 == 0 and verbose:
logging.info('Iteration %d: %f', num_iterations, error)
num_iterations += 1
return q_values
@gin.configurable
def experiment(base_dir,
env,
metrics,
max_iterations=100,
run=0,
random_mdp=False,
verbose=False,
aggregation_method='greedy'):
"""Module to run the experiment.
Args:
base_dir: str, base directory where to save the files.
env: an environment specifying the true underlying MDP.
metrics: list of metrics which will be used for the nearest-neighbour
approximants.
max_iterations: int, maximum number of iterations for each of the
aggregation methods.
run: int, run id.
random_mdp: bool, whether the environment is a random MDP or not.
verbose: bool, whether to print verbose messages.
aggregation_method: string, greedy or k_median method
Returns:
Dict containing statistics.
"""
if env.values is None:
logging.info('Values must have already been computed.')
return
cmap = cm.get_cmap('plasma', 256)
data = {
'Metric': [],
'num_states_target': [],
'run': [],
'qg': [],
'exact_qvalues': [],
'error': []
}
num_states_targets = np.linspace(1, env.num_states, 10).astype(int)
for num_states_target in num_states_targets:
# -(-x//1) is the same as ceil(x).
# num_states_target = max(int(-(-state_fraction * env.num_states // 1)), 1)
for metric in metrics:
if metric.metric is None:
continue
if verbose:
logging.info('***Run %d, %s, %d',
num_states_target, metric.name, run)
if aggregation_method == 'k_median':
aggregate_states, state_to_aggregate_states = (
k_medians(
metric.metric,
env.num_states,
num_states_target,
max_iterations,
verbose=verbose))
if aggregation_method == 'greedy':
aggregate_states, state_to_aggregate_states = (
greedy(
metric.metric,
env.num_states,
num_states_target,
max_iterations,
verbose=verbose))
if not random_mdp:
# Generate plot of neighborhoods.
neighbourhood_path = os.path.join(
base_dir, metric.name,
'neighborhood_{}_{}.pdf'.format(num_states_target, run))
obs_image = env.render_custom_observation(
env.reset(), state_to_aggregate_states, cmap,
boundary_values=[-1, num_states_target])
plt.imshow(obs_image)
with tf.gfile.GFile(neighbourhood_path, 'w') as f:
plt.savefig(f, format='pdf', dpi=300, bbox_inches='tight')
plt.clf()
# Perform value iteration on aggregate states.
q_aggregate = value_iteration(env, aggregate_states)
# Now project the values of the aggregate states to the ground states.
q_projected = [
q_aggregate[state_to_aggregate_states[s]]
for s in range(env.num_states)]
data['Metric'].append(metric.label)
data['num_states_target'].append(num_states_target)
data['run'].append(run)
data['qg'].append(q_projected)
data['exact_qvalues'].append(env.q_val_it_q_values)
data['error'].append(
np.mean(
np.max((np.abs(q_projected - env.q_val_it_q_values)), axis=1)))
return data
def plot_data(base_dir, data):
"""Plot the data collected from all experiment runs."""
del data['qg']
del data['exact_qvalues']
df = pd.DataFrame(data=data)
plt.subplots(1, 1, figsize=(8, 6))
sns.lineplot(x='num_states_target', y='error', hue='Metric', data=df,
ci=99, lw=3)
plt.xlabel('Number of aggregate states', fontsize=24)
plt.ylabel('Avg. Error', fontsize=24)
plt.legend(fontsize=18)
pdf_file = os.path.join(base_dir, 'aggregate_value_iteration.pdf')
with tf.io.gfile.GFile(pdf_file, 'w') as f:
plt.savefig(f, format='pdf', dpi=300, bbox_inches='tight')
plt.clf()
plt.close('all')
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal.results import page_test_results
from telemetry.page import page as page_module
from telemetry.web_perf.metrics import rendering_stats
from telemetry.web_perf.metrics import smoothness
class _MockRenderingStats(object):
stats = ['refresh_period', 'frame_timestamps', 'frame_times', 'paint_times',
'painted_pixel_counts', 'record_times',
'recorded_pixel_counts', 'approximated_pixel_percentages',
'checkerboarded_pixel_percentages', 'input_event_latency',
'frame_queueing_durations', 'main_thread_scroll_latency',
'gesture_scroll_update_latency']
def __init__(self, **kwargs):
self.input_event_latency = None # to avoid pylint no-member error
self.errors = {}
for stat in self.stats:
value = kwargs[stat] if stat in kwargs else None
setattr(self, stat, value)
#pylint: disable=protected-access
class SmoothnessMetricUnitTest(unittest.TestCase):
def setUp(self):
self.metric = smoothness.SmoothnessMetric()
self.page = page_module.Page('file://blank.html')
self.good_timestamps = [[10, 20], [30, 40, 50]]
self.not_enough_frames_timestamps = [[10], [20, 30, 40]]
def testPopulateResultsFromStats(self):
stats = _MockRenderingStats()
for stat in _MockRenderingStats.stats:
# Just set fake data for all of the relevant arrays of stats typically
# found in a RenderingStats object.
setattr(stats, stat, [[10, 20], [30, 40, 50]])
results = page_test_results.PageTestResults()
results.WillRunPage(self.page)
self.metric._PopulateResultsFromStats(results, stats, False)
current_page_run = results.current_page_run
self.assertTrue(current_page_run.ok)
expected_values_count = 12
self.assertEquals(expected_values_count, len(current_page_run.values))
def testHasEnoughFrames(self):
# This list will pass since every sub-array has at least 2 frames.
has_enough_frames = self.metric._HasEnoughFrames(self.good_timestamps)
self.assertTrue(has_enough_frames)
def testHasEnoughFramesWithNotEnoughFrames(self):
# This list will fail since the first sub-array only has a single frame.
has_enough_frames = self.metric._HasEnoughFrames(
self.not_enough_frames_timestamps)
self.assertFalse(has_enough_frames)
def testComputeSurfaceFlingerMetricNoJank(self):
stats = _MockRenderingStats(refresh_period=10,
frame_timestamps=[[10, 20], [130, 140, 150]],
frame_times=[[10], [10, 10]])
avg_surface_fps, jank_count, max_frame_delay, frame_lengths = (
self.metric._ComputeSurfaceFlingerMetric(self.page, stats))
self.assertEquals([1, 1, 1], frame_lengths.values)
self.assertEquals(1, max_frame_delay.value)
self.assertEquals(0, jank_count.value)
self.assertEquals(100, avg_surface_fps.value)
def testComputeSurfaceFlingerMetricJank(self):
stats = _MockRenderingStats(
refresh_period=10,
frame_timestamps=[[10, 20, 50], [130, 140, 150, 170, 180]],
frame_times=[[10, 30], [10, 10, 20, 10]])
avg_surface_fps, jank_count, max_frame_delay, frame_lengths = (
self.metric._ComputeSurfaceFlingerMetric(self.page, stats))
self.assertEquals([1, 3, 1, 1, 2, 1], frame_lengths.values)
self.assertEquals(3, max_frame_delay.value)
self.assertEquals(2, jank_count.value)
self.assertEquals(67, avg_surface_fps.value)
def testComputeFrameTimeMetricWithNotEnoughFrames(self):
stats = _MockRenderingStats(
refresh_period=10,
frame_timestamps=self.not_enough_frames_timestamps,
frame_times=[[10, 20], [30, 40, 50]])
avg_surface_fps, jank_count, max_frame_delay, frame_lengths = (
self.metric._ComputeSurfaceFlingerMetric(self.page, stats))
self.assertEquals(None, avg_surface_fps.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
avg_surface_fps.none_value_reason)
self.assertEquals(None, jank_count.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
jank_count.none_value_reason)
self.assertEquals(None, max_frame_delay.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
max_frame_delay.none_value_reason)
self.assertEquals(None, frame_lengths.values)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
frame_lengths.none_value_reason)
def testComputeLatencyMetric(self):
stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
input_event_latency=[[10, 20], [30, 40, 50]])
# pylint: disable=unbalanced-tuple-unpacking
mean_value, discrepancy_value = self.metric._ComputeLatencyMetric(
self.page, stats, 'input_event_latency', stats.input_event_latency)
self.assertEquals(30, mean_value.value)
self.assertEquals(60, discrepancy_value.value)
def testComputeLatencyMetricWithMissingData(self):
stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
input_event_latency=[[], []])
value = self.metric._ComputeLatencyMetric(
self.page, stats, 'input_event_latency', stats.input_event_latency)
self.assertEquals((), value)
def testComputeLatencyMetricWithNotEnoughFrames(self):
stats = _MockRenderingStats(
frame_timestamps=self.not_enough_frames_timestamps,
input_event_latency=[[], []])
# pylint: disable=unbalanced-tuple-unpacking
mean_value, discrepancy_value = self.metric._ComputeLatencyMetric(
self.page, stats, 'input_event_latency', stats.input_event_latency)
self.assertEquals(None, mean_value.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
mean_value.none_value_reason)
self.assertEquals(None, discrepancy_value.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
discrepancy_value.none_value_reason)
def testComputeGestureScrollUpdateLatencies(self):
stats = _MockRenderingStats(
frame_timestamps=self.good_timestamps,
gesture_scroll_update_latency=[[10, 20], [30, 40, 50]])
gesture_value = self.metric._ComputeFirstGestureScrollUpdateLatencies(
self.page, stats)
self.assertEquals([10, 30], gesture_value.values)
def testComputeGestureScrollUpdateLatenciesWithMissingData(self):
stats = _MockRenderingStats(
frame_timestamps=self.good_timestamps,
gesture_scroll_update_latency=[[], []])
value = self.metric._ComputeFirstGestureScrollUpdateLatencies(
self.page, stats)
self.assertEquals(None, value.values)
def testComputeGestureScrollUpdateLatenciesWithNotEnoughFrames(self):
stats = _MockRenderingStats(
frame_timestamps=self.not_enough_frames_timestamps,
gesture_scroll_update_latency=[[10, 20], [30, 40, 50]])
gesture_value = self.metric._ComputeFirstGestureScrollUpdateLatencies(
self.page, stats)
self.assertEquals(None, gesture_value.values)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
gesture_value.none_value_reason)
def testComputeQueueingDuration(self):
stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
frame_queueing_durations=[[10, 20], [30, 40]])
list_of_scalar_values = self.metric._ComputeQueueingDuration(self.page,
stats)
self.assertEquals([10, 20, 30, 40], list_of_scalar_values.values)
def testComputeQueueingDurationWithMissingData(self):
stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
frame_queueing_durations=[[], []])
list_of_scalar_values = self.metric._ComputeQueueingDuration(
self.page, stats)
self.assertEquals(None, list_of_scalar_values.values)
self.assertEquals('No frame queueing durations recorded.',
list_of_scalar_values.none_value_reason)
def testComputeQueueingDurationWithMissingDataAndErrorValue(self):
stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
frame_queueing_durations=[[], []])
stats.errors['frame_queueing_durations'] = (
'Current chrome version does not support the queueing delay metric.')
list_of_scalar_values = self.metric._ComputeQueueingDuration(
self.page, stats)
self.assertEquals(None, list_of_scalar_values.values)
self.assertEquals(
'Current chrome version does not support the queueing delay metric.',
list_of_scalar_values.none_value_reason)
def testComputeQueueingDurationWithNotEnoughFrames(self):
stats = _MockRenderingStats(
frame_timestamps=self.not_enough_frames_timestamps,
frame_queueing_durations=[[10, 20], [30, 40, 50]])
list_of_scalar_values = self.metric._ComputeQueueingDuration(self.page,
stats)
self.assertEquals(None, list_of_scalar_values.values)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
list_of_scalar_values.none_value_reason)
def testComputeFrameTimeMetric(self):
stats = _MockRenderingStats(frame_timestamps=self.good_timestamps,
frame_times=[[10, 20], [30, 40, 50]])
frame_times_value, mean_frame_time_value, percentage_smooth_value = (
self.metric._ComputeFrameTimeMetric(self.page, stats))
self.assertEquals([10, 20, 30, 40, 50], frame_times_value.values)
self.assertEquals(30, mean_frame_time_value.value)
self.assertEquals(20, percentage_smooth_value.value)
def testComputeFrameTimeMetricWithNotEnoughFrames2(self):
stats = _MockRenderingStats(
frame_timestamps=self.not_enough_frames_timestamps,
frame_times=[[10, 20], [30, 40, 50]])
frame_times_value, mean_frame_time_value, percentage_smooth_value = (
self.metric._ComputeFrameTimeMetric(self.page, stats))
self.assertEquals(None, frame_times_value.values)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
frame_times_value.none_value_reason)
self.assertEquals(None, mean_frame_time_value.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
mean_frame_time_value.none_value_reason)
self.assertEquals(None, percentage_smooth_value.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
percentage_smooth_value.none_value_reason)
def testComputeFrameTimeDiscrepancy(self):
stats = _MockRenderingStats(frame_timestamps=self.good_timestamps)
frame_time_discrepancy_value = self.metric._ComputeFrameTimeDiscrepancy(
self.page, stats)
self.assertEquals(10, frame_time_discrepancy_value.value)
def testComputeFrameTimeDiscrepancyWithNotEnoughFrames(self):
stats = _MockRenderingStats(
frame_timestamps=self.not_enough_frames_timestamps)
frame_time_discrepancy_value = self.metric._ComputeFrameTimeDiscrepancy(
self.page, stats)
self.assertEquals(None, frame_time_discrepancy_value.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
frame_time_discrepancy_value.none_value_reason)
def testComputeMeanPixelsApproximated(self):
stats = _MockRenderingStats(
frame_timestamps=self.good_timestamps,
approximated_pixel_percentages=[[10, 20], [30, 40, 50]])
mean_pixels_value = self.metric._ComputeMeanPixelsApproximated(
self.page, stats)
self.assertEquals(30, mean_pixels_value.value)
def testComputeMeanPixelsApproximatedWithNotEnoughFrames(self):
stats = _MockRenderingStats(
frame_timestamps=self.not_enough_frames_timestamps,
approximated_pixel_percentages=[[10, 20], [30, 40, 50]])
mean_pixels_value = self.metric._ComputeMeanPixelsApproximated(
self.page, stats)
self.assertEquals(None, mean_pixels_value.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
mean_pixels_value.none_value_reason)
def testComputeMeanPixelsCheckerboarded(self):
stats = _MockRenderingStats(
frame_timestamps=self.good_timestamps,
checkerboarded_pixel_percentages=[[10, 20], [30, 40, 50]])
mean_pixels_value = self.metric._ComputeMeanPixelsCheckerboarded(
self.page, stats)
self.assertEquals(30, mean_pixels_value.value)
def testComputeMeanPixelsCheckerboardedWithNotEnoughFrames(self):
stats = _MockRenderingStats(
frame_timestamps=self.not_enough_frames_timestamps,
checkerboarded_pixel_percentages=[[10, 20], [30, 40, 50]])
mean_pixels_value = self.metric._ComputeMeanPixelsCheckerboarded(
self.page, stats)
self.assertEquals(None, mean_pixels_value.value)
self.assertEquals(smoothness.NOT_ENOUGH_FRAMES_MESSAGE,
mean_pixels_value.none_value_reason)
def testComputeMeanPixelsCheckerboardedWithNoData(self):
stats = _MockRenderingStats(
frame_timestamps=self.good_timestamps,
checkerboarded_pixel_percentages=None)
stats.errors[rendering_stats.CHECKERBOARDED_PIXEL_ERROR] = 'test error'
mean_pixels_value = self.metric._ComputeMeanPixelsCheckerboarded(
self.page, stats)
self.assertEquals(None, mean_pixels_value.value)
self.assertEquals('test error',
mean_pixels_value.none_value_reason)
|
|
#!/usr/bin/python3
import subprocess, os, json
import imagemgr
import network
from log import logger
import env
from lvmtool import sys_run, check_volume
from monitor import Container_Collector, History_Manager
import lxc
class Container(object):
def __init__(self, addr, etcdclient):
self.addr = addr
self.etcd=etcdclient
self.libpath = env.getenv('DOCKLET_LIB')
self.confpath = env.getenv('DOCKLET_CONF')
self.fspath = env.getenv('FS_PREFIX')
# set jupyter running dir in container
self.rundir = "/home/jupyter"
# set root running dir in container
self.nodehome = "/root"
self.lxcpath = "/var/lib/lxc"
self.imgmgr = imagemgr.ImageMgr()
self.historymgr = History_Manager()
def create_container(self, lxc_name, proxy_server_ip, username, uid, setting, clustername, clusterid, containerid, hostname, ip, gateway, image):
logger.info("create container %s of %s for %s" %(lxc_name, clustername, username))
try:
setting = json.loads(setting)
cpu = int(setting['cpu']) * 100000
memory = setting["memory"]
disk = setting["disk"]
image = json.loads(image)
status = self.imgmgr.prepareFS(username,image,lxc_name,disk)
if not status:
return [False, "Create container failed when preparing filesystem, possibly insufficient space"]
#Ret = subprocess.run([self.libpath+"/lxc_control.sh",
# "create", lxc_name, username, str(clusterid), hostname,
# ip, gateway, str(cpu), str(memory)], stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT,shell=False, check=True)
rootfs = "/var/lib/lxc/%s/rootfs" % lxc_name
if not os.path.isdir("%s/global/users/%s" % (self.fspath,username)):
path = env.getenv('DOCKLET_LIB')
subprocess.call([path+"/userinit.sh", username])
logger.info("user %s directory not found, create it" % username)
sys_run("mkdir -p /var/lib/lxc/%s" % lxc_name)
logger.info("generate config file for %s" % lxc_name)
def config_prepare(content):
content = content.replace("%ROOTFS%",rootfs)
content = content.replace("%HOSTNAME%",hostname)
content = content.replace("%IP%",ip)
content = content.replace("%GATEWAY%",gateway)
content = content.replace("%CONTAINER_MEMORY%",str(memory))
content = content.replace("%CONTAINER_CPU%",str(cpu))
content = content.replace("%FS_PREFIX%",self.fspath)
content = content.replace("%USERNAME%",username)
content = content.replace("%CLUSTERID%",str(clusterid))
content = content.replace("%LXCSCRIPT%",env.getenv("LXC_SCRIPT"))
content = content.replace("%LXCNAME%",lxc_name)
content = content.replace("%UserID%",str(uid))
content = content.replace("%CLUSTERNAME%", clustername)
content = content.replace("%VETHPAIR%", str(clusterid)+'-'+str(containerid))
return content
conffile = open(self.confpath+"/container.conf", 'r')
conftext = conffile.read()
conffile.close()
conftext = config_prepare(conftext)
conffile = open("/var/lib/lxc/%s/config" % lxc_name,"w")
conffile.write(conftext)
conffile.close()
if os.path.isfile(self.confpath+"/lxc.custom.conf"):
conffile = open(self.confpath+"/lxc.custom.conf", 'r')
conftext = conffile.read()
conffile.close()
conftext = config_prepare(conftext)
conffile = open("/var/lib/lxc/%s/config" % lxc_name, 'a')
conffile.write(conftext)
conffile.close()
#logger.debug(Ret.stdout.decode('utf-8'))
logger.info("create container %s success" % lxc_name)
# get AUTH COOKIE URL for jupyter
[status, authurl] = self.etcd.getkey("web/authurl")
if not status:
[status, masterip] = self.etcd.getkey("service/master")
if status:
webport = env.getenv("WEB_PORT")
authurl = "http://%s:%s/jupyter" % (masterip,
webport)
else:
logger.error ("get AUTH COOKIE URL failed for jupyter")
authurl = "error"
cookiename='docklet-jupyter-cookie'
rundir = self.lxcpath+'/'+lxc_name+'/rootfs' + self.rundir
logger.debug(rundir)
if not os.path.exists(rundir):
os.makedirs(rundir)
else:
if not os.path.isdir(rundir):
os.remove(rundir)
os.makedirs(rundir)
jconfigpath = rundir + '/jupyter.config'
config = open(jconfigpath, 'w')
jconfigs="""USER=%s
PORT=%d
COOKIE_NAME=%s
BASE_URL=%s
HUB_PREFIX=%s
HUB_API_URL=%s
IP=%s
""" % (username, 10000, cookiename, '/'+ proxy_server_ip +'/go/'+username+'/'+clustername, '/jupyter',
authurl, ip.split('/')[0])
config.write(jconfigs)
config.close()
except subprocess.CalledProcessError as sube:
logger.error('create container %s failed: %s' % (lxc_name,
sube.stdout.decode('utf-8')))
return [False, "create container failed"]
except Exception as e:
logger.error(e)
return [False, "create container failed"]
self.historymgr.log(lxc_name,"Create")
return [True, "create container success"]
def delete_container(self, lxc_name):
logger.info ("delete container:%s" % lxc_name)
if self.imgmgr.deleteFS(lxc_name):
Container_Collector.billing_increment(lxc_name)
self.historymgr.log(lxc_name,"Delete")
logger.info("delete container %s success" % lxc_name)
return [True, "delete container success"]
else:
logger.info("delete container %s failed" % lxc_name)
return [False, "delete container failed"]
#status = subprocess.call([self.libpath+"/lxc_control.sh", "delete", lxc_name])
#if int(status) == 1:
# logger.error("delete container %s failed" % lxc_name)
# return [False, "delete container failed"]
#else:
# logger.info ("delete container %s success" % lxc_name)
# return [True, "delete container success"]
# start container, if running, restart it
def start_container(self, lxc_name):
logger.info ("start container:%s" % lxc_name)
c = lxc.Container(lxc_name)
if not c.start():
logger.error('start container %s failed' % lxc_name)
return [False, "start container failed"]
else:
logger.info ("start container %s success" % lxc_name)
self.historymgr.log(lxc_name,"Start")
return [True, "start container success"]
# start container services
# for the master node, jupyter must be started,
# for other node, ssh must be started.
# container must be RUNNING before calling this service
def start_services(self, lxc_name, services=[]):
logger.info ("start services for container %s: %s" % (lxc_name, services))
c = lxc.Container(lxc_name)
Ret = c.attach_wait(lxc.attach_run_command,["service","ssh","start"])
if Ret == 0:
if len(services) == 0: # master node
Ret = c.attach_wait(lxc.attach_run_command,["su","-c","%s/start_jupyter.sh" % self.rundir])
if Ret == 0:
logger.info("start ssh and jupyter notebook services for container %s success" % lxc_name)
return [True, "start container services success"]
else:
logger.info("start ssh service for container %s success" % lxc_name)
return [True, "start container services success"]
logger.error('start services for container %s failed' % lxc_name)
return [False, "start services for container failed"]
# mount_container: mount base image and user image by aufs
def mount_container(self,lxc_name):
logger.info ("mount container:%s" % lxc_name)
[success, status] = self.container_status(lxc_name)
if not success:
return [False, status]
self.imgmgr.checkFS(lxc_name)
return [True, "mount success"]
# recover container: if running, do nothing. if stopped, start it
def recover_container(self, lxc_name):
logger.info ("recover container:%s" % lxc_name)
#status = subprocess.call([self.libpath+"/lxc_control.sh", "status", lxc_name])
[success, status] = self.container_status(lxc_name)
if not success:
return [False, status]
self.imgmgr.checkFS(lxc_name)
if status == 'stopped':
logger.info("%s stopped, recover it to running" % lxc_name)
if self.start_container(lxc_name)[0]:
self.historymgr.log(lxc_name,"Recover")
if self.start_services(lxc_name)[0]:
logger.info("%s recover success" % lxc_name)
return [True, "recover success"]
else:
logger.error("%s recover failed with services not start" % lxc_name)
return [False, "recover failed for services not start"]
else:
logger.error("%s recover failed for container starting failed" % lxc_name)
return [False, "recover failed for container starting failed"]
else:
logger.info("%s recover success" % lxc_name)
return [True, "recover success"]
def update_baseurl(self, lxc_name, old_ip, new_ip):
rundir = self.lxcpath+'/'+lxc_name+'/rootfs' + self.rundir
if not os.path.exists(rundir):
return [False, "container %s doesn't exist"%(lxc_name)]
jconfigpath = rundir + '/jupyter.config'
config = open(jconfigpath, 'r')
context = config.read()
config.close()
context = context.replace(old_ip+"/go", new_ip+"/go")
config = open(jconfigpath, 'w')
config.write(context)
config.close()
return [True,"success"]
def stop_container(self, lxc_name):
logger.info ("stop container:%s" % lxc_name)
[success, status] = self.container_status(lxc_name)
if not success:
return [False, status]
if status == "running":
c = lxc.Container(lxc_name)
if not c.stop():
logger.error("stop container %s failed" % lxc_name)
return [False, "stop container failed"]
else:
self.historymgr.log(lxc_name,"Stop")
logger.info("stop container %s success" % lxc_name)
return [True, "stop container success"]
else:
logger.info("container %s already stopped" % lxc_name)
return [True, "stop container success"]
def detach_container(self, lxc_name):
logger.info("detach container:%s" % lxc_name)
[success, status] = self.container_status(lxc_name)
if not success:
return [False, status]
if status == 'running':
logger.error("container %s is running, please stop it first" % lxc_name)
self.imgmgr.detachFS(lxc_name)
return [True, "detach container success"]
# check container: check LV and mountpoints, if wrong, try to repair it
def check_container(self, lxc_name):
logger.info ("check container:%s" % lxc_name)
if not check_volume("docklet-group", lxc_name):
logger.error("check container %s failed" % lxc_name)
return [False, "check container failed"]
#status = subprocess.call([self.libpath+"/lxc_control.sh", "check", lxc_name])
self.imgmgr.checkFS(lxc_name)
logger.info ("check container %s success" % lxc_name)
return [True, "check container success"]
def is_container(self, lxc_name):
if lxc.Container(lxc_name).defined:
return True
else:
return False
def container_status(self, lxc_name):
if not self.is_container(lxc_name):
return [False, "container not found"]
c = lxc.Container(lxc_name)
if c.running:
return [True, 'running']
else:
return [True, 'stopped']
def list_containers(self):
lxclist = []
for c in lxc.list_containers(as_object=True):
lxclist.append(c.name)
return [True, lxclist]
def delete_allcontainers(self):
logger.info ("deleting all containers...")
[status, containers] = self.list_containers()
result = True
for container in containers:
[result, status] = self.container_status(container)
if status=='running':
self.stop_container(container)
result = result & self.delete_container(container)[0]
if result:
logger.info ("deleted all containers success")
return [True, 'all deleted']
else:
logger.error ("deleted all containers failed")
return [False, 'some containers delete failed']
# list containers in /var/lib/lxc/ as local
# list containers in FS_PREFIX/global/... on this host as global
def diff_containers(self):
[status, localcontainers] = self.list_containers()
globalpath = self.fspath+"/global/users/"
users = os.listdir(globalpath)
globalcontainers = []
for user in users:
clusters = os.listdir(globalpath+user+"/clusters")
for cluster in clusters:
clusterfile = open(globalpath+user+"/clusters/"+cluster, 'r')
clusterinfo = json.loads(clusterfile.read())
for container in clusterinfo['containers']:
if container['host'] == self.addr:
globalcontainers.append(container['containername'])
both = []
onlylocal = []
onlyglobal = []
for container in localcontainers:
if container in globalcontainers:
both.append(container)
else:
onlylocal.append(container)
for container in globalcontainers:
if container not in localcontainers:
onlyglobal.append(container)
return [both, onlylocal, onlyglobal]
def create_image(self,username,imagename,containername,description="not thing",imagenum=10):
return self.imgmgr.createImage(username,imagename,containername,description,imagenum)
def update_basefs(self,imagename):
return self.imgmgr.update_basefs(imagename)
# check all local containers
def check_allcontainers(self):
[both, onlylocal, onlyglobal] = self.diff_containers()
logger.info("check all containers and repair them")
status = True
result = True
for container in both:
logger.info ("%s in LOCAL and GLOBAL checks..." % container)
[status, meg]=self.check_container(container)
result = result & status
if len(onlylocal) > 0:
result = False
logger.error ("some container only exists in LOCAL: %s" % onlylocal)
if len(onlyglobal) > 0:
result = False
logger.error ("some container only exists in GLOBAL: %s" % onlyglobal)
if status:
logger.info ("check all containers success")
return [True, 'all is ok']
else:
logger.error ("check all containers failed")
return [False, 'not ok']
|
|
import numpy as np
from scipy.interpolate import pchip, Akima1DInterpolator
from scipy.linalg import norm
from openmdao.main.api import Component, Assembly
from openmdao.lib.datatypes.api import Instance, Array, VarTree, Enum, Int, List, Str, Float, Bool
from fusedwind.lib.distfunc import distfunc
from fusedwind.lib.cubicspline import NaturalCubicSpline
from fusedwind.lib.geom_tools import RotMat, dotXC, calculate_length, curvature
from fusedwind.lib.bezier import BezierCurve
from fusedwind.turbine.geometry_vt import Curve, BladePlanformVT, BladeSurfaceVT, BlendAirfoilShapes, AirfoilShape
from fusedwind.interface import base, implement_base
class SplineBase(object):
"""
base for 1-D splines
if the spline requires it, implement a fitting procedure in __init__
place the main call to the spline in __call__
"""
def initialize(self, Cx, xp, yp):
pass
def normdist(self, xp):
"""normalize x distribution"""
return (xp - xp[0]) / (xp[-1] - xp[0])
def __call__(self, x, Cx, C):
"""
params:
----------
x: array
array with new x-distribution
xp: array
array with x-coordinates of spline control points
yp: array
array with y-coordinates of spline control points
returns
---------
ynew: array
resampled points
"""
raise NotImplementedError('A derived class of SplineBase needs to implement a __call__ method')
class pchipSpline(SplineBase):
def initialize(self, x, xp, yp):
"""
params:
----------
x: array
array with new x-distribution
xp: array
array with original x-distribution
yp: array
array with original y-distribution
returns
---------
ynew: array
resampled points
"""
return self.__call__(x, xp, yp)
def __call__(self, x, Cx, C):
"""
params:
----------
x: array
array with new x-distribution
xp: array
array with x-coordinates of spline control points
yp: array
array with y-coordinates of spline control points
returns
---------
ynew: array
resampled points
"""
spl = pchip(Cx, C)
return spl(x)
class BezierSpline(SplineBase):
def initialize(self, x, xp, yp):
"""
params:
----------
x: array
array with new x-distribution
xp: array
array with original x-distribution
yp: array
array with original y-distribution
returns
---------
ynew: array
resampled points
"""
self.B = BezierCurve()
self.B.CPs = np.array([xp, yp]).T
return self.__call__(x, xp, yp)
def __call__(self, x, Cx, C):
"""
params:
----------
x: array
array with new x-distribution
xp: array
array with x-coordinates of spline control points
yp: array
array with y-coordinates of spline control points
returns
---------
ynew: array
resampled points
"""
self.B.CPs = np.array([Cx, C]).T
self.B.update()
spl = NaturalCubicSpline(self.B.points[:, 0], self.B.points[:, 1])
return spl(x)
spline_dict = {'pchip': pchipSpline,
'bezier': BezierSpline}
@base
class SplineComponentBase(Component):
"""
FUSED-Wind base class for splines
"""
spline_type = Enum('pchip', ('pchip', 'bezier', 'bspline','akima','cubic'),
iotype='in', desc='spline type used')
nC = Int(iotype='in')
Cx = Array(iotype='in', desc='Spanwise distribution of control points [0:1]')
x = Array(iotype='in', desc='Spanwise discretization')
xinit = Array(np.linspace(0,1,10), iotype='in', desc='Initial spanwise distribution')
Pinit = Array(np.zeros(10), iotype='in', desc='Initial curve as function of span')
P = Array(iotype='out', desc='Output curve')
def __init__(self, nC=8):
super(SplineComponentBase, self).__init__()
self.init_called = False
self.nC = nC
# the spline engine derived from SplineBase (set by parent class)
self.spline = None
self.add('C', Array(np.zeros(nC), size=(nC,),
dtype=float,
iotype='in',
desc='spline control points of cross-sectional curve fraction'
'ending point of region'))
self.set_spline(self.spline_type)
def set_spline(self, spline_type):
self.spline = spline_dict[spline_type]()
self.spline_type = spline_type
def initialize(self):
"""
"""
self.set_spline(self.spline_type)
self.C = self.spline(self.Cx, self.xinit, self.Pinit)
def execute(self):
"""
Default behaviour is to copy the input array
derived classes need to overwrite this class with specific splines
"""
if not self.init_called:
self.initialize()
self.P = self.spline(self.x, self.Cx, self.C)
@base
class FFDSplineComponentBase(Component):
"""
FUSED-Wind base class for splines
"""
spline_type = Enum('pchip', ('pchip', 'bezier', 'bspline','akima','cubic'),
iotype='in', desc='spline type used')
base_spline_type = Enum('pchip', ('pchip', 'bezier', 'bspline','akima','cubic'),
iotype='in', desc='spline type used')
nC = Int(iotype='in')
Cx = Array(iotype='in', desc='Spanwise distribution of control points [0:1]')
x = Array(iotype='in', desc='Spanwise discretization')
xinit = Array(np.linspace(0,1,10), iotype='in', desc='Initial spanwise distribution')
Pinit = Array(np.zeros(10), iotype='in', desc='Initial curve as function of span')
P = Array(iotype='out', desc='Output curve')
dPds = Array(iotype='out', desc='Curvature')
def __init__(self, nC=8):
super(FFDSplineComponentBase, self).__init__()
self.init_called = False
self.nC = nC
# the spline engine derived from SplineBase (set by parent class)
self.spline = None
self.add('C', Array(np.zeros(nC), size=(nC,),
dtype=float,
iotype='in',
desc='spline control points of cross-sectional curve fraction'
'ending point of region'))
self.set_spline(self.spline_type)
def set_spline(self, spline_type):
self.spline = spline_dict[spline_type]()
self.spline_type = spline_type
def initialize(self):
"""
"""
self.base_spline = spline_dict[self.base_spline_type]()
self.set_spline(self.spline_type)
self.Pbase = self.base_spline(self.x, self.xinit, self.Pinit)
self.spline.initialize(self.x, self.Cx, self.C)
def execute(self):
"""
Default behaviour is to copy the input array
derived classes need to overwrite this class with specific splines
"""
if not self.init_called:
self.initialize()
self.P = self.Pbase + self.spline(self.x, self.Cx, self.C)
self.dPds = curvature(np.array([self.x, self.P]).T)
@base
class ModifyBladePlanformBase(Component):
"""
Base for classes that modify a blade planform object
"""
pfIn = VarTree(BladePlanformVT(), iotype='in')
pfOut = VarTree(BladePlanformVT(), iotype='out')
@implement_base(ModifyBladePlanformBase)
class RedistributedBladePlanform(Component):
"""
Redistribute an existing planform onto a new distribution x
"""
x = Array(iotype='in', desc='New spanwise discretization')
pfIn = VarTree(BladePlanformVT(), iotype='in')
pfOut = VarTree(BladePlanformVT(), iotype='out')
def execute(self):
self.pfOut.blade_length = self.pfIn.blade_length
self.pfIn._compute_s()
for name in self.pfIn.list_vars():
var = getattr(self.pfIn, name)
if not isinstance(var, np.ndarray): continue
tck = pchip(self.pfIn.s, var)
newvar = tck(self.x)
setattr(self.pfOut, name, newvar)
self.pfOut._compute_s()
def redistribute_blade_planform(pfIn, x):
pfOut = BladePlanformVT()
pfOut.s = x.copy()
for name in pfIn.list_vars():
var = getattr(pfIn, name)
if not isinstance(var, np.ndarray): continue
tck = Akima1DInterpolator(pfIn.s, var)
newvar = tck(x)
setattr(pfOut, name, newvar)
return pfOut
def read_blade_planform(filename):
data = np.loadtxt(filename)
s = calculate_length(data[:, [0, 1, 2]])
pf = BladePlanformVT()
pf.blade_length = data[-1, 2]
pf.s = s / s[-1]
pf.smax = s[-1]
pf.x = data[:, 0] / data[-1, 2]
pf.y = data[:, 1] / data[-1, 2]
pf.z = data[:, 2] / data[-1, 2]
pf.rot_x = data[:, 3]
pf.rot_y = data[:, 4]
pf.rot_z = data[:, 5]
pf.chord = data[:, 6] / data[-1, 2]
pf.rthick = data[:, 7]
pf.rthick /= pf.rthick.max()
pf.athick = pf.rthick * pf.chord
pf.p_le = data[:, 8]
return pf
@base
class BladePlanformWriter(Component):
filebase = Str('blade')
pf = VarTree(BladePlanformVT(), iotype='in')
def execute(self):
name = self.filebase + self.itername + '.pfd'
try:
if '-fd' in self.itername or '-fd' in self.parent.itername:
name = self.filebase + '.pfd'
except:
pass
data = np.array([self.pf.x,
self.pf.y,
self.pf.z,
self.pf.rot_x,
self.pf.rot_y,
self.pf.rot_z,
self.pf.chord,
self.pf.rthick,
self.pf.p_le]).T
fid = open(name, 'w')
header = ['main_axis_x', 'main_axis_y', 'main_axis_z', 'rot_x', 'rot_y', 'rot_z', 'chord', 'rthick', 'p_le']
exp_prec = 10 # exponential precesion
col_width = exp_prec + 8 # column width required for exp precision
header_full = '# ' + ''.join([(hh + ' [%i]').center(col_width + 2)%i for i, hh in enumerate(header)])+'\n'
fid.write(header_full)
np.savetxt(fid, data, fmt='%'+' %i.%ie' % (col_width, exp_prec))
fid.close()
class ComputeDist(Component):
"""
simple redistribution function that clusters cells towards one end
"""
span_ni = Int(iotype='in')
x_dist = Array(iotype='in')
x = Array(iotype='out')
def execute(self):
if self.x_dist.shape[0] > 0:
self.x = self.x_dist
else:
self.x = distfunc([[0., -1, 1], [1., 0.2 * 1./self.span_ni, self.span_ni]])
class ScaleChord(Component):
"""
component to replace
connect(sname + '.P'+
'*blade_length/blade_length_ref', 'pfOut.' + vname)
"""
scaler = Float(iotype='in')
cIn = Array(iotype='in')
cOut = Array(iotype='out')
def execute(self):
self.cOut = self.scaler * self.cIn
class ComputeAthick(Component):
"""
component to replace connection:
connect('chord.P*rthick.P', 'pfOut.athick')
"""
chord = Array(iotype='in')
rthick = Array(iotype='in')
athick = Array(iotype='out')
def execute(self):
self.athick = self.chord * self.rthick
class ComputeSmax(Component):
x = Array(iotype='in')
y = Array(iotype='in')
z = Array(iotype='in')
smax = Float(iotype='in')
def execute(self):
s = calculate_length(np.array([self.x,
self.y,
self.z]).T)
self.smax = s[-1]
@implement_base(ModifyBladePlanformBase)
class SplinedBladePlanform(Assembly):
nC = Int(8, iotype='in', desc='Number of spline control points along span')
Cx = Array(iotype='in', desc='spanwise distribution of spline control points')
blade_length = Float(1., iotype='in')
blade_length_ref = Float(iotype='in')
span_ni = Int(50, iotype='in')
pfIn = VarTree(BladePlanformVT(), iotype='in')
pfOut = VarTree(BladePlanformVT(), iotype='out')
def __init__(self):
super(SplinedBladePlanform, self).__init__()
self.blade_length_ref = 0.
self.add('compute_x', ComputeDist())
self.driver.workflow.add('compute_x')
self.connect('span_ni', 'compute_x.span_ni')
self.connect('compute_x.x', 'pfOut.s')
self.create_passthrough('compute_x.x_dist')
def _pre_execute(self):
super(SplinedBladePlanform, self)._pre_execute()
# set reference length first time this comp is executed
if self.blade_length_ref == 0.:
self.blade_length_ref = self.blade_length
def configure_splines(self, spline_type='pchip'):
if hasattr(self, 'chord_C'):
return
if self.Cx.shape[0] == 0:
self.Cx = np.linspace(0, 1, self.nC)
else:
self.nC = self.Cx.shape[0]
self.connect('blade_length', 'pfOut.blade_length')
for vname in self.pfIn.list_vars():
if vname in ['s', 'smax', 'athick', 'blade_length']:
continue
cIn = self.get('pfIn.' + vname)
cOut = self.get('pfOut.' + vname)
sname = vname.replace('.','_')
spl = self.add(sname, FFDSplineComponentBase(self.nC))
self.driver.workflow.add(sname)
# spl.log_level = logging.DEBUG
spl.set_spline(spline_type)
self.connect('compute_x.x', sname + '.x')
self.connect('Cx', sname + '.Cx')
spl.xinit = self.get('pfIn.s')
spl.Pinit = cIn
if vname == 'chord':
self.add('scaleC', ScaleChord())
self.driver.workflow.add('scaleC')
self.connect('chord.P', 'scaleC.cIn')
self.connect('blade_length/blade_length_ref', 'scaleC.scaler')
self.connect('scaleC.cOut', 'pfOut.chord')
# self.connect(sname + '.P'+
# '*blade_length/blade_length_ref', 'pfOut.' + vname)
else:
self.connect(sname + '.P', 'pfOut.' + vname)
self.create_passthrough(sname + '.C', alias=sname + '_C')
self.create_passthrough(sname + '.dPds', alias=sname + '_dPds')
self.add('athick', ComputeAthick())
self.driver.workflow.add('athick')
self.connect('chord.P', 'athick.chord')
self.connect('rthick.P', 'athick.rthick')
self.connect('athick.athick', 'pfOut.athick')
self.add('smax', ComputeSmax())
self.driver.workflow.add('smax')
self.connect('x.P', 'smax.x')
self.connect('y.P', 'smax.y')
self.connect('z.P', 'smax.z')
self.connect('smax.smax', 'pfOut.smax')
@base
class LoftedBladeSurfaceBase(Component):
surfout = VarTree(BladeSurfaceVT(), iotype='out')
surfnorot = VarTree(BladeSurfaceVT(), iotype='out')
@implement_base(LoftedBladeSurfaceBase)
class LoftedBladeSurface(Component):
pf = VarTree(BladePlanformVT(), iotype='in')
base_airfoils = List(iotype='in')
blend_var = Array(iotype='in')
chord_ni = Int(300, iotype='in')
span_ni = Int(300, iotype='in')
redistribute_flag = Bool(False, desc='redistribute points chordwise')
x_chordwise = Array(iotype='in', desc='user specified chordwise distribution')
interp_type = Enum('rthick', ('rthick', 's'), iotype='in')
surface_spline = Str('akima', iotype='in', desc='Spline')
rot_order = Array(np.array([2,1,0]),iotype='in',desc='rotation order of airfoil sections'
'default z,y,x (twist,sweep,dihedral)')
surfout = VarTree(BladeSurfaceVT(), iotype='out')
surfnorot = VarTree(BladeSurfaceVT(), iotype='out')
def execute(self):
self.interpolator = BlendAirfoilShapes()
self.interpolator.ni = self.chord_ni
self.interpolator.spline = self.surface_spline
self.interpolator.blend_var = self.blend_var
self.interpolator.airfoil_list = self.base_airfoils
self.interpolator.initialize()
self.span_ni = self.pf.s.shape[0]
x = np.zeros((self.chord_ni, self.span_ni, 3))
for i in range(self.span_ni):
s = self.pf.s[i]
pos_x = self.pf.x[i]
pos_y = self.pf.y[i]
pos_z = self.pf.z[i]
chord = self.pf.chord[i]
p_le = self.pf.p_le[i]
# generate the blended airfoil shape
if self.interp_type == 'rthick':
rthick = self.pf.rthick[i]
points = self.interpolator(rthick)
else:
points = self.interpolator(s)
points = self.redistribute(points, pos_z)
points *= chord
points[:, 0] -= chord * p_le
# x-coordinate needs to be inverted for clockwise rotating blades
x[:, i, :] = (np.array([-points[:,0], points[:,1], x.shape[0] * [pos_z]]).T)
# save blade without sweep and prebend
x_norm = x.copy()
# add translation and rotation
x[:, :, 0] += self.pf.x
x[:, :, 1] += self.pf.y
x = self.rotate(x)
self.surfnorot.surface = x_norm
self.surfout.surface = x
def rotate(self, x):
"""
rotate blade sections accounting for twist and main axis orientation
the blade will be built with a "sheared" layout, ie no rotation around y
in the case of sweep.
if the main axis includes a winglet, the blade sections will be
rotated accordingly. ensure that an adequate point distribution is
used in this case to avoid sections colliding in the winglet junction!
"""
main_axis = Curve(points=np.array([self.pf.x, self.pf.y, self.pf.z]).T)
rot_normals = np.zeros((3,3))
x_rot = np.zeros(x.shape)
for i in range(x.shape[1]):
points = x[:, i, :]
rot_center = main_axis.points[i]
# rotation angles read from file
angles = np.array([self.pf.rot_x[i],
self.pf.rot_y[i],
self.pf.rot_z[i]]) * np.pi / 180.
# compute rotation angles of main_axis
t = main_axis.dp[i]
rot = np.zeros(3)
rot[0] = -np.arctan(t[1]/(t[2]+1.e-20))
v = np.array([t[2], t[1]])
vt = np.dot(v, v)**0.5
rot[1] = (np.arcsin(t[0]/vt))
angles[0] += rot[0]
angles[1] += rot[1]
# compute x-y-z normal vectors of rotation
n_y = np.cross(t, [1,0,0])
n_y = n_y/norm(n_y)
rot_normals[0, :] = np.array([1,0,0])
rot_normals[1, :] = n_y
rot_normals[2, :] = t
# compute final rotation matrix
rotation_matrix = np.matrix([[1.,0.,0.],[0.,1.,0.],[0.,0.,1.]])
for n, ii in enumerate(self.rot_order):
mat = np.matrix(RotMat(rot_normals[ii], angles[ii]))
rotation_matrix = mat * rotation_matrix
# apply rotation
x_rot[:, i, :] = dotXC(rotation_matrix, points, rot_center)
return x_rot
def redistribute(self, points, pos_z):
if self.redistribute_flag == False:
return points
airfoil = AirfoilShape(points=points)
try:
dist_LE = np.interp(pos_z, self.dist_LE[:, 0], self.dist_LE[:, 1])
except:
dist_LE = None
# pass airfoil to user defined routine to allow for additional configuration
airfoil = self.set_airfoil(airfoil, pos_z)
if self.x_chordwise.shape[0] > 0:
airfoil = airfoil.redistribute_chordwise(self.x_chordwise)
else:
airfoil = airfoil.redistribute(ni=self.chord_ni, dLE=dist_LE)
return airfoil.points
def set_airfoil(self, airfoil, pos_z):
if hasattr(self, 'gf_height'):
height = self.gf_height(pos_z)
length_factor = self.gf_length_factor(pos_z)
print 'gf', pos_z, height, length_factor
if height > 0.:
airfoil = airfoil.gurneyflap(height, length_factor)
return airfoil
def add_gurney_flap(self, s, gf_heights, gf_length_factor):
"""spline the gurney flap height and length factor curves"""
self.gf_height = pchip(s, gf_heights)
self.gf_length_factor = pchip(s, gf_length_factor)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from warnings import warn
from .cqlhandling import CqlParsingRuleSet, Hint
from cql.cqltypes import (cql_types, lookup_casstype, CompositeType, UTF8Type,
ColumnToCollectionType, CounterColumnType)
from . import helptopics
simple_cql_types = set(cql_types)
simple_cql_types.difference_update(('set', 'map', 'list'))
cqldocs = helptopics.CQL3HelpTopics()
try:
import json
except ImportError:
import simplejson as json
class UnexpectedTableStructure(UserWarning):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
SYSTEM_KEYSPACES = ('system', 'system_traces', 'system_auth')
NONALTERBALE_KEYSPACES = ('system', 'system_traces')
class Cql3ParsingRuleSet(CqlParsingRuleSet):
keywords = set((
'select', 'from', 'where', 'and', 'key', 'insert', 'update', 'with',
'limit', 'using', 'use', 'count', 'set',
'begin', 'apply', 'batch', 'truncate', 'delete', 'in', 'create',
'keyspace', 'schema', 'columnfamily', 'table', 'index', 'on', 'drop',
'primary', 'into', 'values', 'timestamp', 'ttl', 'alter', 'add', 'type',
'compact', 'storage', 'order', 'by', 'asc', 'desc', 'clustering',
'token', 'writetime', 'map', 'list', 'to'
))
unreserved_keywords = set((
'key', 'clustering', 'ttl', 'compact', 'storage', 'type', 'values'
))
columnfamily_options = (
# (CQL option name, Thrift option name (or None if same))
('comment', None),
('compaction_strategy_class', 'compaction_strategy'),
('comparator', 'comparator_type'),
('default_validation', 'default_validation_class'),
('gc_grace_seconds', None),
('index_interval', None),
('read_repair_chance', None),
('replicate_on_write', None),
('populate_io_cache_on_flush', None),
)
old_columnfamily_layout_options = (
# (CQL3 option name, schema_columnfamilies column name (or None if same))
('bloom_filter_fp_chance', None),
('caching', None),
('comment', None),
('compaction_strategy_class', None),
('dclocal_read_repair_chance', 'local_read_repair_chance'),
('gc_grace_seconds', None),
('index_interval', None),
('read_repair_chance', None),
('replicate_on_write', None),
('populate_io_cache_on_flush', None),
)
new_columnfamily_layout_options = (
('bloom_filter_fp_chance', None),
('caching', None),
('comment', None),
('dclocal_read_repair_chance', 'local_read_repair_chance'),
('gc_grace_seconds', None),
('index_interval', None),
('read_repair_chance', None),
('replicate_on_write', None),
('populate_io_cache_on_flush', None),
)
old_columnfamily_layout_map_options = (
# (CQL3 option prefix, schema_columnfamilies column name (or None if same),
# list of known suboptions)
('compaction_strategy_options', None,
('min_compaction_threshold', 'max_compaction_threshold')),
('compression_parameters', None,
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
)
new_columnfamily_layout_map_options = (
# (CQL3 option name, schema_columnfamilies column name (or None if same),
# list of known map keys)
('compaction', 'compaction_strategy_options',
('class', 'min_threshold', 'max_threshold')),
('compression', 'compression_parameters',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
)
new_obsolete_cf_options = (
'compaction_parameters',
'compaction_strategy_class',
'compaction_strategy_options',
'compression_parameters',
'max_compaction_threshold',
'min_compaction_threshold',
)
@staticmethod
def token_dequote(tok):
if tok[0] == 'unclosedName':
# strip one quote
return tok[1][1:].replace('""', '"')
# cql2 version knows how to do everything else
return CqlParsingRuleSet.token_dequote(tok)
@classmethod
def cql3_dequote_value(cls, value):
return cls.cql2_dequote_value(value)
@staticmethod
def cql3_dequote_name(name):
name = name.strip()
if name == '':
return name
if name[0] == '"' and name[-1] == '"':
name = name[1:-1].replace('""', '"')
return name
@classmethod
def cql3_escape_value(cls, value):
return cls.cql2_escape_value(value)
@staticmethod
def cql3_escape_name(name):
return '"%s"' % name.replace('"', '""')
valid_cql3_word_re = re.compile(r'^[a-z][0-9a-z_]*$')
@classmethod
def is_valid_cql3_name(cls, s):
if s is None:
return False
if s.lower() in cls.keywords - cls.unreserved_keywords:
return False
return cls.valid_cql3_word_re.match(s) is not None
@classmethod
def cql3_maybe_escape_name(cls, name):
if cls.is_valid_cql3_name(name):
return name
return cls.cql3_escape_name(name)
@classmethod
def dequote_any(cls, t):
if t[0] == '"':
return cls.cql3_dequote_name(t)
return CqlParsingRuleSet.dequote_any(t)
dequote_value = cql3_dequote_value
dequote_name = cql3_dequote_name
escape_value = cql3_escape_value
escape_name = cql3_escape_name
maybe_escape_name = cql3_maybe_escape_name
CqlRuleSet = Cql3ParsingRuleSet()
# convenience for remainder of module
shorthands = ('completer_for', 'explain_completion',
'dequote_value', 'dequote_name',
'escape_value', 'escape_name',
'maybe_escape_name')
for shorthand in shorthands:
globals()[shorthand] = getattr(CqlRuleSet, shorthand)
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= /'([^']|'')*'/ ;
<quotedName> ::= /"([^"]|"")*"/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<wholenumber> ::= /[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<endtoken> ::= ";" ;
<op> ::= /[-+=,().]/ ;
<cmp> ::= /[<>]=?/ ;
<brackets> ::= /[][{}]/ ;
<integer> ::= "-"? <wholenumber> ;
<boolean> ::= "true"
| "false"
;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedName> ::= /"([^"]|"")*/ ;
<unclosedComment> ::= /[/][*][^\n]*$/ ;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
| <boolean>
;
<tokenDefinition> ::= token="TOKEN" "(" <term> ( "," <term> )* ")"
| <stringLiteral>
;
<value> ::= <term>
| <collectionLiteral>
;
<cident> ::= <quotedName>
| <identifier>
| <unreservedKeyword>
;
<colname> ::= <cident> ; # just an alias
<collectionLiteral> ::= <listLiteral>
| <setLiteral>
| <mapLiteral>
;
<listLiteral> ::= "[" ( <term> ( "," <term> )* )? "]"
;
<setLiteral> ::= "{" ( <term> ( "," <term> )* )? "}"
;
<mapLiteral> ::= "{" <term> ":" <term> ( "," <term> ":" <term> )* "}"
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
| <authenticationStatement>
| <authorizationStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <alterTableStatement>
| <alterKeyspaceStatement>
;
<authenticationStatement> ::= <createUserStatement>
| <alterUserStatement>
| <dropUserStatement>
| <listUsersStatement>
;
<authorizationStatement> ::= <grantStatement>
| <revokeStatement>
| <listPermissionsStatement>
;
# timestamp is included here, since it's also a keyword
<simpleStorageType> ::= typename=( <identifier> | <stringLiteral> | <K_TIMESTAMP> ) ;
<storageType> ::= <simpleStorageType> | <collectionType> ;
<collectionType> ::= "map" "<" <simpleStorageType> "," <simpleStorageType> ">"
| "list" "<" <simpleStorageType> ">"
| "set" "<" <simpleStorageType> ">"
;
<columnFamilyName> ::= ( ksname=<cfOrKsName> dot="." )? cfname=<cfOrKsName> ;
<keyspaceName> ::= ksname=<cfOrKsName> ;
<nonSystemKeyspaceName> ::= ksname=<cfOrKsName> ;
<alterableKeyspaceName> ::= ksname=<cfOrKsName> ;
<cfOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<unreservedKeyword> ::= nocomplete=
( <K_KEY>
| <K_CLUSTERING>
# | <K_COUNT> -- to get count(*) completion, treat count as reserved
| <K_TTL>
| <K_COMPACT>
| <K_STORAGE>
| <K_TYPE>
| <K_VALUES> )
;
# <property> will be defined once cqlsh determines whether we're using
# 3.0.0-beta1 or later. :/
<newPropSpec> ::= [propname]=<cident> propeq="=" [propval]=<propertyValue>
;
<propertyValue> ::= propsimpleval=( <stringLiteral>
| <identifier>
| <integer>
| <float>
| <unreservedKeyword> )
# we don't use <mapLiteral> here so we can get more targeted
# completions:
| propsimpleval="{" [propmapkey]=<term> ":" [propmapval]=<term>
( ender="," [propmapkey]=<term> ":" [propmapval]=<term> )*
ender="}"
;
<oldPropSpec> ::= [propname]=<optionName> propeq="=" [optval]=<optionVal>
;
<optionName> ::= optname=<cident> ( optsep=":" subopt=( <cident> | <wholenumber> ) )?
;
<optionVal> ::= <identifier>
| <stringLiteral>
| <integer>
| <float>
;
'''
def use_pre_3_0_0_syntax():
# cassandra-1.1 support
CqlRuleSet.append_rules('''
<property> ::= <oldPropSpec> ;
''')
CqlRuleSet.columnfamily_layout_map_options = \
CqlRuleSet.old_columnfamily_layout_map_options
CqlRuleSet.columnfamily_layout_options = \
CqlRuleSet.old_columnfamily_layout_options
def use_post_3_0_0_syntax():
CqlRuleSet.append_rules('''
<property> ::= <newPropSpec> ;
''')
CqlRuleSet.columnfamily_layout_map_options = \
CqlRuleSet.new_columnfamily_layout_map_options
CqlRuleSet.columnfamily_layout_options = \
CqlRuleSet.new_columnfamily_layout_options
CqlRuleSet.obsolete_cf_options += CqlRuleSet.new_obsolete_cf_options
def prop_equals_completer(ctxt, cass):
if not working_on_keyspace(ctxt):
# we know if the thing in the property name position is "compact" or
# "clustering" that there won't actually be an equals sign, because
# there are no properties by those names. there are, on the other hand,
# table properties that start with those keywords which don't have
# equals signs at all.
curprop = ctxt.get_binding('propname')[-1].upper()
if curprop in ('COMPACT', 'CLUSTERING'):
return ()
return ['=']
completer_for('oldPropSpec', 'propeq')(prop_equals_completer)
completer_for('newPropSpec', 'propeq')(prop_equals_completer)
@completer_for('newPropSpec', 'propname')
def new_prop_name_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_new_prop_name_completer(ctxt, cass)
else:
return cf_new_prop_name_completer(ctxt, cass)
@completer_for('propertyValue', 'propsimpleval')
def new_prop_val_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_new_prop_val_completer(ctxt, cass)
else:
return cf_new_prop_val_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapkey')
def new_prop_val_mapkey_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_new_prop_val_mapkey_completer(ctxt, cass)
else:
return cf_new_prop_val_mapkey_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapval')
def new_prop_val_mapval_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_new_prop_val_mapval_completer(ctxt, cass)
else:
return cf_new_prop_val_mapval_completer(ctxt, cass)
@completer_for('propertyValue', 'ender')
def new_prop_val_mapender_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_new_prop_val_mapender_completer(ctxt, cass)
else:
return cf_new_prop_val_mapender_completer(ctxt, cass)
def ks_new_prop_name_completer(ctxt, cass):
optsseen = ctxt.get_binding('propname', ())
if 'replication' not in optsseen:
return ['replication']
return ["durable_writes"]
def ks_new_prop_val_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname == 'durable_writes':
return ["'true'", "'false'"]
if optname == 'replication':
return ["{'class': '"]
return ()
def ks_new_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return ["'class'"]
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
elif repclass == 'NetworkTopologyStrategy':
return [Hint('<dc_name>')]
return map(escape_value, opts.difference(keysseen))
def ks_new_prop_val_mapval_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1])
if currentkey == 'class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<value>')]
def ks_new_prop_val_mapender_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return [',']
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return [',']
if repclass in CqlRuleSet.replication_factor_strategies:
if 'replication_factor' not in keysseen:
return [',']
if repclass == 'NetworkTopologyStrategy' and len(keysseen) == 1:
return [',']
return ['}']
def cf_new_prop_name_completer(ctxt, cass):
return [c[0] for c in (CqlRuleSet.columnfamily_layout_options +
CqlRuleSet.columnfamily_layout_map_options)]
def cf_new_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('propname')
this_opt = exist_opts[-1]
if this_opt == 'compression':
return ["{'sstable_compression': '"]
if this_opt == 'compaction':
return ["{'class': '"]
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance',
'dclocal_read_repair_chance'):
return [Hint('<float_between_0_and_1>')]
if this_opt in ('replicate_on_write', 'populate_io_cache_on_flush'):
return ["'yes'", "'no'"]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold',
'gc_grace_seconds', 'index_interval'):
return [Hint('<integer>')]
if this_opt == 'default_read_consistency':
return [cl for cl in CqlRuleSet.consistency_levels if cl != 'ANY']
if this_opt == 'default_write_consistency':
return CqlRuleSet.consistency_levels
return [Hint('<option_value>')]
def cf_new_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
for cql3option, _, subopts in CqlRuleSet.columnfamily_layout_map_options:
if optname == cql3option:
break
else:
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
pairsseen = dict(zip(keysseen, valsseen))
if optname == 'compression':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'compaction':
opts = set(subopts)
try:
csc = pairsseen['class']
except KeyError:
return ["'class'"]
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
opts.add('min_sstable_size')
elif csc == 'LeveledCompactionStrategy':
opts.add('sstable_size_in_mb')
return map(escape_value, opts)
return ()
def cf_new_prop_val_mapval_completer(ctxt, cass):
opt = ctxt.get_binding('propname')[-1]
key = dequote_value(ctxt.get_binding('propmapkey')[-1])
if opt == 'compaction':
if key == 'class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
return [Hint('<option_value>')]
elif opt == 'compression':
if key == 'sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
return [Hint('<option_value>')]
return ()
def cf_new_prop_val_mapender_completer(ctxt, cass):
return [',', '}']
@completer_for('optionName', 'optname')
def old_prop_name_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_old_prop_name_completer(ctxt, cass)
else:
return cf_old_prop_name_completer(ctxt, cass)
@completer_for('oldPropSpec', 'optval')
def old_prop_val_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_old_prop_val_completer(ctxt, cass)
else:
return cf_old_prop_val_completer(ctxt, cass)
@completer_for('optionName', 'optsep')
def old_prop_separator_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_old_prop_separator_completer(ctxt, cass)
else:
return cf_old_prop_separator_completer(ctxt, cass)
@completer_for('optionName', 'subopt')
def old_prop_suboption_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_old_prop_suboption_completer(ctxt, cass)
else:
return cf_old_prop_suboption_completer(ctxt, cass)
def ks_old_prop_name_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname', ())
try:
stratopt = exist_opts.index('strategy_class')
except ValueError:
return ['strategy_class =']
vals = ctxt.get_binding('optval')
stratclass = dequote_value(vals[stratopt])
if stratclass in CqlRuleSet.replication_factor_strategies:
return ['strategy_options:replication_factor =']
return [Hint('<strategy_option_name>')]
def ks_old_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname', (None,))
if exist_opts[-1] == 'strategy_class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<option_value>')]
def ks_old_prop_separator_completer(ctxt, cass):
curopt = ctxt.get_binding('optname')[-1]
if curopt == 'strategy_options':
return [':']
return ()
def ks_old_prop_suboption_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname')
if exist_opts[-1] != 'strategy_options':
return ()
try:
stratopt = exist_opts.index('strategy_class')
except ValueError:
return ()
vals = ctxt.get_binding('optval')
stratclass = dequote_value(vals[stratopt])
if stratclass in CqlRuleSet.replication_factor_strategies:
return ['replication_factor =']
return [Hint('<dc_name>')]
def cf_old_prop_name_completer(ctxt, cass):
return list(CqlRuleSet.columnfamily_layout_options) + \
[c[0] + ':' for c in CqlRuleSet.columnfamily_layout_map_options]
def cf_old_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('propname')
this_opt = exist_opts[-1]
if this_opt == 'compression_parameters:sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
if this_opt == 'compaction_strategy_class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('comparator', 'default_validation'):
return simple_cql_types
if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance'):
return [Hint('<float_between_0_and_1>')]
if this_opt in ('replicate_on_write', 'populate_io_cache_on_flush'):
return [Hint('<yes_or_no>')]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold', 'gc_grace_seconds'):
return [Hint('<integer>')]
return [Hint('<option_value>')]
def cf_old_prop_separator_completer(ctxt, cass):
opt = ctxt.get_binding('optname')
if any(opt == c[0] for c in CqlRuleSet.columnfamily_layout_map_options):
return [':']
return ()
def cf_old_prop_suboption_completer(ctxt, cass):
opt = ctxt.get_binding('optname')
if opt == 'compaction_strategy_options':
# try to determine the strategy class in use
prevopts = ctxt.get_binding('propname', ())
prevvals = ctxt.get_binding('optval', ())
for prevopt, prevval in zip(prevopts, prevvals):
if prevopt == 'compaction_strategy_class':
csc = dequote_value(prevval)
break
else:
layout = get_cf_layout(ctxt, cass)
try:
csc = layout.compaction_strategy
except Exception:
csc = ''
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
return ['min_sstable_size']
elif csc == 'LeveledCompactionStrategy':
return ['sstable_size_in_mb']
for optname, _, subopts in CqlRuleSet.columnfamily_layout_map_options:
if opt == optname:
return subopts
return ()
@completer_for('tokenDefinition', 'token')
def token_word_completer(ctxt, cass):
return ['TOKEN(']
@completer_for('simpleStorageType', 'typename')
def storagetype_completer(ctxt, cass):
return simple_cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('nonSystemKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in SYSTEM_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('alterableKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in NONALTERBALE_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('columnFamilyName', 'ksname')
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('columnFamilyName', 'dot')
def cf_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
@completer_for('unreservedKeyword', 'nocomplete')
def unreserved_keyword_completer(ctxt, cass):
# we never want to provide completions through this production;
# this is always just to allow use of some keywords as column
# names, CF names, property values, etc.
return ()
def get_cf_layout(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
cf = dequote_name(ctxt.get_binding('cfname'))
return cass.get_columnfamily_layout(ks, cf)
def working_on_keyspace(ctxt):
wat = ctxt.get_binding('wat').upper()
if wat in ('KEYSPACE', 'SCHEMA'):
return True
return False
syntax_rules += r'''
<useStatement> ::= "USE" <keyspaceName>
;
<selectStatement> ::= "SELECT" <selectClause>
"FROM" cf=<columnFamilyName>
("WHERE" <whereClause>)?
("ORDER" "BY" <orderByClause> ( "," <orderByClause> )* )?
("LIMIT" limit=<wholenumber>)?
;
<whereClause> ::= <relation> ("AND" <relation>)*
;
<relation> ::= [rel_lhs]=<cident> ("=" | "<" | ">" | "<=" | ">=") <term>
| token="TOKEN" "(" [rel_tokname]=<cident>
( "," [rel_tokname]=<cident> )*
")" ("=" | "<" | ">" | "<=" | ">=") <tokenDefinition>
| [rel_lhs]=<cident> "IN" "(" <term> ( "," <term> )* ")"
;
<selectClause> ::= <selector> ("," <selector>)*
| "*"
| "COUNT" "(" star=( "*" | "1" ) ")"
;
<selector> ::= [colname]=<cident>
| "WRITETIME" "(" [colname]=<cident> ")"
| "TTL" "(" [colname]=<cident> ")"
;
<orderByClause> ::= [ordercol]=<cident> ( "ASC" | "DESC" )?
;
'''
@completer_for('orderByClause', 'ordercol')
def select_order_column_completer(ctxt, cass):
prev_order_cols = ctxt.get_binding('ordercol', ())
keyname = ctxt.get_binding('keyname')
if keyname is None:
keyname = ctxt.get_binding('rel_lhs', ())
if not keyname:
return [Hint("Can't ORDER BY here: need to specify partition key in WHERE clause")]
layout = get_cf_layout(ctxt, cass)
order_by_candidates = layout.column_aliases[:]
if len(order_by_candidates) > len(prev_order_cols):
return [maybe_escape_name(order_by_candidates[len(prev_order_cols)])]
return [Hint('No more orderable columns here.')]
@completer_for('relation', 'token')
def relation_token_word_completer(ctxt, cass):
return ['TOKEN(']
@completer_for('relation', 'rel_tokname')
def relation_token_subject_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
return [layout.partition_key_components[0]]
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
filterable = set((layout.partition_key_components[0], layout.column_aliases[0]))
already_filtered_on = map(dequote_name, ctxt.get_binding('rel_lhs'))
for num in range(1, len(layout.partition_key_components)):
if layout.partition_key_components[num - 1] in already_filtered_on:
filterable.add(layout.partition_key_components[num])
else:
break
for num in range(1, len(layout.column_aliases)):
if layout.column_aliases[num - 1] in already_filtered_on:
filterable.add(layout.column_aliases[num])
else:
break
for cd in layout.columns:
if cd.index_name is not None:
filterable.add(cd.name)
return map(maybe_escape_name, filterable)
@completer_for('selectClause', 'star')
def select_count_star_completer(ctxt, cass):
return ['*']
explain_completion('selector', 'colname')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
"(" [colname]=<cident> "," [colname]=<cident>
( "," [colname]=<cident> )* ")"
"VALUES" "(" [newval]=<value> valcomma="," [newval]=<value>
( valcomma="," [newval]=<value> )* valcomma=")"
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "TIMESTAMP" <wholenumber>
| "TTL" <wholenumber>
;
'''
@completer_for('insertStatement', 'colname')
def insert_colname_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
colnames = set(map(dequote_name, ctxt.get_binding('colname', ())))
keycols = layout.primary_key_components
for k in keycols:
if k not in colnames:
return [maybe_escape_name(k)]
normalcols = set([c.name for c in layout.columns]) - set(keycols) - colnames
return map(maybe_escape_name, normalcols)
@completer_for('insertStatement', 'newval')
def insert_newval_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
insertcols = map(dequote_name, ctxt.get_binding('colname'))
valuesdone = ctxt.get_binding('newval', ())
if len(valuesdone) >= len(insertcols):
return []
curcol = insertcols[len(valuesdone)]
cqltype = layout.get_column(curcol).cqltype
coltype = cqltype.typename
if coltype in ('map', 'set'):
return ['{']
if coltype == 'list':
return ['[']
if coltype == 'boolean':
return ['true', 'false']
return [Hint('<value for %s (%s)>' % (maybe_escape_name(curcol),
cqltype.cql_parameterized_type()))]
@completer_for('insertStatement', 'valcomma')
def insert_valcomma_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
numcols = len(ctxt.get_binding('colname', ()))
numvals = len(ctxt.get_binding('newval', ()))
if numcols > numvals:
return [',']
return [')']
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <whereClause>
;
<assignment> ::= updatecol=<cident>
( "=" update_rhs=( <value> | <cident> )
( counterop=( "+" | "-" ) inc=<wholenumber>
| listadder="+" listcol=<cident> )
| indexbracket="[" <term> "]" "=" <term> )
;
'''
@completer_for('updateStatement', 'updateopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
normals = set([cm.name for cm in layout.columns]) \
- set(layout.primary_key_components)
return map(maybe_escape_name, normals)
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
cqltype = layout.get_column(curcol).cqltype
coltype = cqltype.typename
if coltype == 'counter':
return maybe_escape_name(curcol)
if coltype in ('map', 'set'):
return ["{"]
if coltype == 'list':
return ["["]
return [Hint('<term (%s)>' % cqltype.cql_parameterized_type())]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if layout.is_counter_col(curcol) else []
@completer_for('assignment', 'inc')
def update_counter_inc_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
if layout.is_counter_col(curcol):
return Hint('<wholenumber>')
return []
@completer_for('assignment', 'listadder')
def update_listadder_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
return ['+']
@completer_for('assignment', 'listcol')
def update_listcol_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
colname = dequote_name(ctxt.get_binding('updatecol'))
return [maybe_escape_name(colname)]
return []
@completer_for('assignment', 'indexbracket')
def update_indexbracket_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.get_column(curcol).cqltype.typename
if coltype in ('map', 'list'):
return ['[']
return []
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( <deleteSelector> ( "," <deleteSelector> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> ( "AND" [delopt]=<deleteOption> )* )?
"WHERE" <whereClause>
;
<deleteSelector> ::= delcol=<cident> ( memberbracket="[" memberselector=<term> "]" )?
;
<deleteOption> ::= "TIMESTAMP" <wholenumber>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('deleteSelector', 'delcol')
def delete_delcol_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
cols = set([c.name for c in layout.columns
if c not in layout.primary_key_components])
return map(maybe_escape_name, cols)
syntax_rules += r'''
<batchStatement> ::= "BEGIN" ( "UNLOGGED" | "COUNTER" )? "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"
( [batchstmt]=<batchStatementMember> ";" )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" wat=( "KEYSPACE" | "SCHEMA" ) ksname=<cfOrKsName>
"WITH" <property> ( "AND" <property> )*
;
'''
@completer_for('createKeyspaceStatement', 'wat')
def create_ks_wat_completer(ctxt, cass):
# would prefer to get rid of the "schema" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['KEYSPACE']
return ['KEYSPACE', 'SCHEMA']
@completer_for('oldPropSpec', 'optname')
def create_ks_opt_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname', ())
try:
stratopt = exist_opts.index('strategy_class')
except ValueError:
return ['strategy_class =']
vals = ctxt.get_binding('optval')
stratclass = dequote_value(vals[stratopt])
if stratclass in CqlRuleSet.replication_factor_strategies:
return ['strategy_options:replication_factor =']
return [Hint('<strategy_option_name>')]
@completer_for('oldPropSpec', 'optval')
def create_ks_optval_completer(ctxt, cass):
exist_opts = ctxt.get_binding('optname', (None,))
if exist_opts[-1] == 'strategy_class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<option_value>')]
@completer_for('newPropSpec', 'propname')
def keyspace_properties_option_name_completer(ctxt, cass):
optsseen = ctxt.get_binding('propname', ())
if 'replication' not in optsseen:
return ['replication']
return ["durable_writes"]
@completer_for('propertyValue', 'propsimpleval')
def property_value_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname == 'durable_writes':
return ["'true'", "'false'"]
if optname == 'replication':
return ["{'class': '"]
return ()
@completer_for('propertyValue', 'propmapkey')
def keyspace_properties_map_key_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return ["'class'"]
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
elif repclass == 'NetworkTopologyStrategy':
return [Hint('<dc_name>')]
return map(escape_value, opts.difference(keysseen))
@completer_for('propertyValue', 'propmapval')
def keyspace_properties_map_value_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1])
if currentkey == 'class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<value>')]
@completer_for('propertyValue', 'ender')
def keyspace_properties_map_ender_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return [',']
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return [',']
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
if 'replication_factor' not in keysseen:
return [',']
if repclass == 'NetworkTopologyStrategy' and len(keysseen) == 1:
return [',']
return ['}']
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" wat=( "COLUMNFAMILY" | "TABLE" )
( ks=<nonSystemKeyspaceName> dot="." )? cf=<cfOrKsName>
"(" ( <singleKeyCfSpec> | <compositeKeyCfSpec> ) ")"
( "WITH" <cfamProperty> ( "AND" <cfamProperty> )* )?
;
<cfamProperty> ::= <property>
| "COMPACT" "STORAGE"
| "CLUSTERING" "ORDER" "BY" "(" <cfamOrdering>
( "," <cfamOrdering> )* ")"
;
<cfamOrdering> ::= [ordercol]=<cident> ( "ASC" | "DESC" )
;
<singleKeyCfSpec> ::= [newcolname]=<cident> <simpleStorageType> "PRIMARY" "KEY"
( "," [newcolname]=<cident> <storageType> )*
;
<compositeKeyCfSpec> ::= [newcolname]=<cident> <simpleStorageType>
"," [newcolname]=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )*
"," "PRIMARY" k="KEY" p="(" ( partkey=<pkDef> | [pkey]=<cident> )
( c="," [pkey]=<cident> )* ")"
;
<pkDef> ::= "(" [ptkey]=<cident> "," [ptkey]=<cident>
( "," [ptkey]=<cident> )* ")"
;
'''
@completer_for('cfamOrdering', 'ordercol')
def create_cf_clustering_order_colname_completer(ctxt, cass):
colnames = map(dequote_name, ctxt.get_binding('newcolname', ()))
# Definitely some of these aren't valid for ordering, but I'm not sure
# precisely which are. This is good enough for now
return colnames
@completer_for('createColumnFamilyStatement', 'wat')
def create_cf_wat_completer(ctxt, cass):
# would prefer to get rid of the "columnfamily" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['TABLE']
return ['TABLE', 'COLUMNFAMILY']
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('compositeKeyCfSpec', 'newcolname', '<new_column_name>')
@completer_for('createColumnFamilyStatement', 'dot')
def create_cf_ks_dot_completer(ctxt, cass):
ks = dequote_name(ctxt.get_binding('ks'))
if ks in cass.get_keyspace_names():
return ['.']
return []
@completer_for('pkDef', 'ptkey')
def create_cf_pkdef_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'pkey')
def create_cf_composite_key_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ()) + ctxt.get_binding('pkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'k')
def create_cf_composite_primary_key_keyword_completer(ctxt, cass):
return ['KEY (']
@completer_for('compositeKeyCfSpec', 'p')
def create_cf_composite_primary_key_paren_completer(ctxt, cass):
return ['(']
@completer_for('compositeKeyCfSpec', 'c')
def create_cf_composite_primary_key_comma_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('pkey', ())
if len(pieces_already) >= len(cols_declared) - 1:
return ()
return [',']
syntax_rules += r'''
<createIndexStatement> ::= "CREATE" "INDEX" indexname=<identifier>? "ON"
cf=<columnFamilyName> "(" col=<cident> ")"
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
colnames = [cd.name for cd in layout.columns if cd.index_name is None]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ksname=<nonSystemKeyspaceName>
;
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
;
<dropIndexStatement> ::= "DROP" "INDEX" indexname=<identifier>
;
'''
@completer_for('dropIndexStatement', 'indexname')
def drop_index_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_index_names())
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" ( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
<alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType>
| "DROP" existcol=<cident>
| "WITH" <cfamProperty> ( "AND" <cfamProperty> )*
;
'''
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
layout = get_cf_layout(ctxt, cass)
cols = [md.name for md in layout.columns]
return map(maybe_escape_name, cols)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
syntax_rules += r'''
<alterKeyspaceStatement> ::= "ALTER" ( "KEYSPACE" | "SCHEMA" ) ks=<alterableKeyspaceName>
"WITH" <newPropSpec> ( "AND" <newPropSpec> )*
;
'''
syntax_rules += r'''
<username> ::= name=( <identifier> | <stringLiteral> )
;
<createUserStatement> ::= "CREATE" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<alterUserStatement> ::= "ALTER" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<dropUserStatement> ::= "DROP" "USER" <username>
;
<listUsersStatement> ::= "LIST" "USERS"
;
'''
syntax_rules += r'''
<grantStatement> ::= "GRANT" <permissionExpr> "ON" <resource> "TO" <username>
;
<revokeStatement> ::= "REVOKE" <permissionExpr> "ON" <resource> "FROM" <username>
;
<listPermissionsStatement> ::= "LIST" <permissionExpr>
( "ON" <resource> )? ( "OF" <username> )? "NORECURSIVE"?
;
<permission> ::= "AUTHORIZE"
| "CREATE"
| "ALTER"
| "DROP"
| "SELECT"
| "MODIFY"
;
<permissionExpr> ::= ( <permission> "PERMISSION"? )
| ( "ALL" "PERMISSIONS"? )
;
<resource> ::= <dataResource>
;
<dataResource> ::= ( "ALL" "KEYSPACES" )
| ( "KEYSPACE" <keyspaceName> )
| ( "TABLE"? <columnFamilyName> )
;
'''
@completer_for('username', 'name')
def username_name_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE USER.
if ctxt.matched[0][0] == 'K_CREATE':
return [Hint('<username>')]
cursor = cass.conn.cursor()
cursor.execute("LIST USERS")
return [maybe_quote(row[0].replace("'", "''")) for row in cursor.fetchall()]
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
# current assumption is that all valid CQL tables match the rules in the
# following table.
#
# non-empty non-empty multiple composite
# value_alias column_aliases key_aliases comparator
# ---------------------+----------------------------------------------------
# A: single-column PK, |
# compact storage | either no no no
# ---------------------+----------------------------------------------------
# B: single-column PK, |
# dynamic storage | no no no yes
# ---------------------+----------------------------------------------------
# C: compound PK, |
# plain part. key, | yes[1] yes no either
# compact storage |
# ---------------------+----------------------------------------------------
# D: compound PK, |
# plain part. key, | no yes no yes
# dynamic storage |
# ---------------------+----------------------------------------------------
# E: compound PK, |
# multipart part. key, |
# all key components | either no yes no
# go in part. key, |
# compact storage |
# ---------------------+----------------------------------------------------
# F: compound PK, |
# multipart part. key, |
# all key components | no no yes yes
# go in part. key, |
# dynamic storage |
# ---------------------+----------------------------------------------------
# G: compound PK, |
# multipart part. key, |
# some key components | yes[1] yes yes either
# not in part. key, |
# compact storage |
# ---------------------+----------------------------------------------------
# H: compound PK, |
# multipart part. key, |
# some key components | no yes yes yes
# not in part. key, |
# dynamic storage |
# ---------------------+----------------------------------------------------
#
# [1] the value_alias may be blank, but not null.
# for compact storage:
#
# if no column aliases:
# comparator will be UTF8Type
# elif one column alias:
# comparator will be type of that column
# else:
# comparator will be composite of types of all column_aliases
#
# for dynamic storage:
#
# comparator is composite of types of column_aliases, followed by UTF8Type,
# followed by one CTCT if there are collections.
class CqlColumnDef:
index_name = None
def __init__(self, name, cqltype):
self.name = name
self.cqltype = cqltype
assert name is not None
@classmethod
def from_layout(cls, layout):
try:
colname = layout[u'column_name']
except KeyError:
colname = layout[u'column']
c = cls(colname, lookup_casstype(layout[u'validator']))
c.index_name = layout[u'index_name']
return c
def __str__(self):
indexstr = ' (index %s)' % self.index_name if self.index_name is not None else ''
return '<CqlColumnDef %r %r%s>' % (self.name, self.cqltype, indexstr)
__repr__ = __str__
class CqlTableDef:
json_attrs = ('column_aliases', 'compaction_strategy_options', 'compression_parameters',
'key_aliases')
colname_type = UTF8Type
column_class = CqlColumnDef
"""True if this CF has compact storage (isn't a CQL3 table)"""
compact_storage = False
"""Names of all columns which are part of the primary key, whether or not
they are grouped into the partition key"""
primary_key_components = ()
"""Names of all columns which are grouped into the partition key"""
partition_key_components = ()
"""Names of all columns which are part of the primary key, but not grouped
into the partition key"""
column_aliases = ()
"""CqlColumnDef objects for all columns. Use .get_column() to access one
by name."""
columns = ()
def __init__(self, name):
self.name = name
@classmethod
def from_layout(cls, layout, coldefs):
"""
This constructor accepts a dictionary of column-value pairs from a row
of system.schema_columnfamilies, and a sequence of similar dictionaries
from corresponding rows in system.schema_columns.
"""
try:
cfname = layout[u'columnfamily_name']
ksname = layout[u'keyspace_name']
except KeyError:
cfname = layout[u'columnfamily']
ksname = layout[u'keyspace']
cf = cls(name=cfname)
for attr, val in layout.items():
setattr(cf, attr.encode('ascii'), val)
cf.keyspace = ksname
for attr in cls.json_attrs:
try:
val = getattr(cf, attr)
# cfs created in 1.1 may not have key_aliases defined
if attr == 'key_aliases' and val is None:
val = '[]'
setattr(cf, attr, json.loads(val))
except AttributeError:
pass
cf.partition_key_validator = lookup_casstype(cf.key_validator)
cf.comparator = lookup_casstype(cf.comparator)
cf.default_validator = lookup_casstype(cf.default_validator)
cf.coldefs = coldefs
cf.compact_storage = cf.is_compact_storage()
cf.key_aliases = cf.get_key_aliases()
cf.partition_key_components = cf.key_aliases
cf.column_aliases = cf.get_column_aliases()
cf.primary_key_components = cf.key_aliases + list(cf.column_aliases)
cf.columns = cf.get_columns()
return cf
# not perfect, but good enough; please read CFDefinition constructor comments
# returns False if we are dealing with a CQL3 table, True otherwise.
# 'compact' here means 'needs WITH COMPACT STORAGE option for CREATE TABLE in CQL3'.
def is_compact_storage(self):
if not issubclass(self.comparator, CompositeType):
return True
for subtype in self.comparator.subtypes:
if issubclass(subtype, ColumnToCollectionType):
return False
if len(self.column_aliases) == len(self.comparator.subtypes) - 1:
if self.comparator.subtypes[-1] is UTF8Type:
return False
return True
def get_key_aliases(self):
if not issubclass(self.partition_key_validator, CompositeType):
return self.key_aliases or (self.key_alias and [self.key_alias]) or [u'key']
expected = len(self.partition_key_validator.subtypes)
# key, key2, key3, ..., keyN
aliases = [u'key'] + [ u'key' + str(i) for i in range(2, expected + 1) ]
# append the missing (non-renamed) aliases (if any)
return self.key_aliases + aliases[len(self.key_aliases):]
def get_column_aliases(self):
# CQL3 table
if not self.compact_storage:
return self.column_aliases
if not issubclass(self.comparator, CompositeType):
# static cf
if self.coldefs:
return []
else:
return self.column_aliases or [u'column1']
expected = len(self.comparator.subtypes)
# column1, column2, column3, ..., columnN
aliases = [ u'column' + str(i) for i in range(1, expected + 1) ]
# append the missing (non-renamed) aliases (if any)
return self.column_aliases + aliases[len(self.column_aliases):]
def get_columns(self):
if self.compact_storage:
return self.get_columns_compact()
else:
return self.get_columns_cql3()
# dense composite or dynamic cf or static cf (technically not compact).
def get_columns_compact(self):
if issubclass(self.partition_key_validator, CompositeType):
partkey_types = self.partition_key_validator.subtypes
else:
partkey_types = [self.partition_key_validator]
partkey_cols = map(self.column_class, self.partition_key_components, partkey_types)
if len(self.column_aliases) == 0:
if self.comparator is not UTF8Type:
warn(UnexpectedTableStructure("Compact storage CF %s has no column aliases,"
" but comparator is not UTF8Type." % (self.name,)))
colalias_types = []
elif issubclass(self.comparator, CompositeType):
colalias_types = self.comparator.subtypes
else:
colalias_types = [self.comparator]
if len(colalias_types) != len(self.column_aliases):
warn(UnexpectedTableStructure("Compact storage CF comparator-types %r is not"
" the same length as its column_aliases %r"
% (colalias_types, self.column_aliases)))
colalias_cols = map(self.column_class, self.column_aliases, colalias_types)
if self.value_alias is not None:
if self.coldefs:
warn(UnexpectedTableStructure("Compact storage CF has both a value_alias"
" (%r) and entries in system.schema_columns"
% (self.value_alias,)))
if self.value_alias == '':
value_cols = []
else:
value_cols = [self.column_class(self.value_alias, self.default_validator)]
elif self.value_alias is None and not self.coldefs:
value_cols = [self.column_class("value", self.default_validator)]
else:
value_cols = map(self.column_class.from_layout, self.coldefs)
value_cols.sort(key=lambda c: c.name)
return partkey_cols + colalias_cols + value_cols
# sparse composite (CQL3 table).
def get_columns_cql3(self):
if issubclass(self.partition_key_validator, CompositeType):
partkey_types = self.partition_key_validator.subtypes
else:
partkey_types = [self.partition_key_validator]
partkey_cols = map(self.column_class, self.partition_key_components, partkey_types)
for subtype in self.comparator.subtypes[:-1]:
if issubclass(subtype, ColumnToCollectionType):
warn(UnexpectedTableStructure("ColumnToCollectionType found, but not in "
"last position inside composite comparator"))
coltypes = list(self.comparator.subtypes)
if issubclass(coltypes[-1], ColumnToCollectionType):
# all this information should be available in schema_columns
coltypes.pop(-1)
if len(coltypes) != len(self.column_aliases) + 1 or coltypes[-1] is not UTF8Type:
warn(UnexpectedTableStructure("CQL3 CF does not have UTF8Type"
" added to comparator"))
colalias_cols = map(self.column_class, self.column_aliases, coltypes[:-1])
if self.value_alias is not None:
warn(UnexpectedTableStructure("CQL3 CF has a value_alias (%r)"
% (self.value_alias,)))
value_cols = map(self.column_class.from_layout, self.coldefs)
value_cols.sort(key=lambda c: c.name)
return partkey_cols + colalias_cols + value_cols
def is_counter_col(self, colname):
try:
return bool(self.get_column(colname).cqltype is CounterColumnType)
except KeyError:
return False
def get_column(self, colname):
col_info = [cm for cm in self.columns if cm.name == colname]
if not col_info:
raise KeyError("column %r not found" % (colname,))
return col_info[0]
def __str__(self):
return '<%s %s.%s>' % (self.__class__.__name__, self.keyspace, self.name)
__repr__ = __str__
|
|
# Copyright (c) 2015, Aaron Karper <[email protected]>
from __future__ import print_function
import random
import os
import functools
import itertools
import sys
from pprint import pprint
class Arbitrary(object):
"""Environment for the running of randomized tests.
"""
def genOrMake(self, f):
if callable(f):
return f(self)
return f
def __init__(self, seed=None, size=None, verbose=False):
"""Initializes the random generator."""
seed = seed or os.environ.get('QC_SEED')
self.random = random.Random(x=seed)
self.size = size or 256
self.verbose = verbose or os.environ.has_key('QC_VERBOSE')
def integers(self):
"""Stream of integers from -2**-size to 2**size
"""
while True:
size = 2**self.random.randint(0, self.size)
yield self.random.randint(-size, size)
def non_negative(self):
"""Stream of integers from 0 to 2**size
"""
while True:
size = 2**self.random.randint(0, self.size)
yield self.random.randint(0, size)
def floats(self):
"""Stream of floats from -2**-size to 2**size
"""
while True:
size = 2**self.random.randint(0, self.size)
yield self.random.uniform(-size, size)
def lists(self, items=integers):
"""Stream of random lists up to len size.
"""
iter = self.genOrMake(items)
while True:
size = self.random.randint(0, self.size)
yield [next(iter) for _ in xrange(size)]
def tuples(self, items=integers):
"""Stream of random tuples up to len size.
"""
return itertools.imap(tuple, self.lists(items))
def key_value_generator(self, keys=integers, values=integers):
keys_i = self.genOrMake(keys)
vals_i = self.genOrMake(values)
while True:
yield (next(keys), next(values))
def dicts(self, key_values=key_value_generator, keys=None, values=None):
"""Stream of random dicts up to len size.
"""
if keys is not None and values is not None:
key_i, val_i = self.genOrMake(keys), self.genOrMake(values)
key_values = itertools.izip(key_i, val_i)
items = self.lists(key_values)
while True:
size = self.random.randint(0, self.size)
yield dict(next(items) for _ in xrange(size))
def unicode_chars(self, min=0, max=512):
"""Stream of random unicode characters
"""
# TODO: Make more elaborate generator
while True:
yield unichr(self.random.randint(min, max))
def chars(self, min=0, max=255):
"""Stream of random characters
"""
while True:
yield chr(self.random.randint(min, max))
def unicodes(self, minunicode=0, maxunicode=512):
"""Stream of random unicode strings
"""
chars = self.unicode_char(minunicode, maxunicode)
while True:
size = self.random.randint(0, self.size)
yield unicode('').join(next(chars) for _ in xrange(r))
def strings(self, min=0, max=255):
"""Stream of random strings
"""
chars = self.char(min, max)
while True:
size = self.random.randint(0, self.size)
yield ''.join(next(chars) for _ in xrange(r))
def objects(self, _object_class, _fields={}, *init_args, **init_kwargs):
"""Stream of random objects with attributes from dict and constructor
arguments.
"""
init_args = [self.genOrMake(f) for f in init_args]
init_kwargs = dict((k, self.genOrMake(f))
for k, f in init_kwargs.iteritems())
_fields = dict((k, self.genOrMake(f))
for k, f in _fields.iteritems())
while True:
ctor_args = [next(arg) for arg in init_args]
ctor_kwargs = dict((k, next(v)) for k, v in init_kwargs.iteritems())
obj = _object_class(*ctor_args, **ctor_kwargs)
for k, v in _fields.iteritems():
setattr(obj, k, next(v))
yield obj
def forall(self, tries=100, size=None, seed=None, **kwargs):
"""Decorator for tests to feed randomized arguments.
"""
self.size = size
self.seed = seed
self.random = random.Random(x=seed)
def wrap(f):
@functools.wraps(f)
def wrapped(*inargs, **inkwargs):
for _ in xrange(tries):
random_kwargs = dict(inkwargs)
for name, gen in kwargs.iteritems():
random_kwargs[name] = next(self.genOrMake(gen))
try:
if self.verbose:
pprint(random_kwargs)
f(*inargs, **random_kwargs)
except:
print("Counter example:", file=sys.stderr)
pprint(random_kwargs, stream=sys.stderr)
raise
return wrapped
return wrap
DEFAULT = Arbitrary()
def get_first_or_default(args):
if not args:
return DEFAULT, args
if isinstance(args[0], Arbitrary):
return args[0], args[1:]
else:
return DEFAULT, args
def integers(*args, **kwargs):
self, args = get_first_or_default(args)
return self.integers(*args, **kwargs)
def non_negative(*args, **kwargs):
self, args = get_first_or_default(args)
return self.non_negative(*args, **kwargs)
def floats(*args, **kwargs):
self, args = get_first_or_default(args)
return self.floats(*args, **kwargs)
def lists(*args, **kwargs):
self, args = get_first_or_default(args)
return self.lists(*args, **kwargs)
def tuples(*args, **kwargs):
self, args = get_first_or_default(args)
return self.tuples(*args, **kwargs)
def unicode_chars(*args, **kwargs):
self, args = get_first_or_default(args)
return self.unicode_chars(*args, **kwargs)
def chars(*args, **kwargs):
self, args = get_first_or_default(args)
return self.chars(*args, **kwargs)
def unicodes(*args, **kwargs):
self, args = get_first_or_default(args)
return self.unicodes(*args, **kwargs)
def strings(*args, **kwargs):
self, args = get_first_or_default(args)
return self.strings(*args, **kwargs)
def objects(*args, **kwargs):
self, args = get_first_or_default(args)
return self.objects(*args, **kwargs)
def forall(*args, **kwargs):
self, args = get_first_or_default(args)
return self.forall(*args, **kwargs)
__all__ = ['integers', 'floats', 'lists', 'tuples',
'unicodes', 'characters', 'objects', 'forall',
'Arbitrary']
|
|
from PIL import Image
from hotspotter.other.AbstractPrintable import AbstractDataManager
from hotspotter.other.logger import logmsg, logdbg, logerr, logio, logwarn
import numpy as np
import os.path
import pylab
import re
import types
import hotspotter.ChipFunctions
# Chip Manager handle the chips
# this entails managing:
# chips directory
# image chips
# feature representation
class ChipManager(AbstractDataManager):
'''The chip manager maintains chip information including
feature representations, name, image, extent, and property
information.'''
# --- CID CONVINENCE FUNCTIONS ---
def is_valid(cm, cid):
#if np.iterable(cid):
#return all([cm.is_valid(cid_) for cid_ in cid])
return cid < len(cm.cid2_cx) and cid >= 0 and cm.cid2_cx[cid] > 0
def iscx_valid(cm, cx):
return cx < len(cm.cx2_cid) and cx >= 0 and cm.cx2_cid[cx] > 0
def cid(cm, cx):
'maps cx to cid with error checks'
if not cm.iscx_valid(cx):
logerr('CX=%s is invalid' % str(cx))
return cm.cx2_cid[cx]
def cx(cm, cid):
'maps cid to cx with error checks'
if not cm.is_valid(cid):
logerr('CID=%s is invalid' % str(cid))
return cm.cid2_cx[cid]
def gid(cm, cid):
return cm.cx2_gid(cm.cx(cid))
def info(cm, cid_list=None, lbls=None):
return cm.cx2_info(cm.cx(cid_list), lbls)
# TODO: info and cx2_ and cx2_dynget should merge. A standard should be chosen
def cx2_dynget(cm, cx_list, *dynargs):
return cm.cx2_(cx_list, *dynargs)
def cx2_(cm, cx_list, *dynargs):
'request chip data'
'conviencience function to get many properties'
#logdbg('Requested Data: %s of CX= %s' % (str(dynargs), str(cx_list)))
to_return = []
cid = cm.cx2_cid[cx_list]
invalid_x = pylab.find(cid == 0)
if len(invalid_x) > 0:
logerr('Requested invalid cxs: '+str(cx_list[invalid_x]))
for arg in dynargs:
if arg == 'cx':
to_return.append(cx_list)
elif arg == 'cid':
to_return.append(cm.cx2_cid[cx_list])
elif arg == 'nid':
to_return.append(cm.cx2_nid(cx_list))
elif arg == 'gid':
to_return.append(cm.cx2_gid(cx_list))
elif arg == 'chip':
to_return.append(cm.cx2_chip(cx_list))
elif arg == 'name':
to_return.append(cm.cx2_name(cx_list))
elif arg == 'gname':
to_return.append(cm.cx2_gname(cx_list))
else:
to_return.append('__UNFILLED__') # mark unfilled requests
return to_return
def cid2_(cm, cid_list, *dynargs):
'convienence for cids instead of cxs'
cx_list = cm.cx(cid_list)
return cm.cx2_(cx_list, *dynargs)
# --- ACTUAL WORK FUNCTIONS
def __init__(cm,hs):
super( ChipManager, cm ).__init__( hs )
# --- Table Info ---
cm.cx2_cid = np.empty(0, dtype=np.uint32) # index to Chip id
cm.cx2_nx = np.empty(0, dtype=np.uint32) # index to Name id
cm.cx2_gx = np.empty(0, dtype=np.uint32) # index to imaGe id
cm.cx2_roi = np.empty((0,4), dtype=object) # (x,y,w,h)
cm.cx2_theta = np.empty(0, dtype=np.float32) # roi orientation
# --- Feature Representation of Chip ---
cm.cx2_fpts = np.empty(0, dtype=object) # heshes keypoints
cm.cx2_fdsc = np.empty(0, dtype=object) # Root SIFT fdscriptors
cm.cx2_dirty_bit = np.empty(0, dtype=np.bool) # Dirty bit flag (need to recompute)
# --- Reverse Index --
cm.cid2_cx = np.array([], dtype=np.uint32)
# --- Book Keeping --
cm.next_cx = 1 # the next indeX we are going to use
cm.next_cid = 1 # the next ID we are going to use
cm.num_c = 0 # number of chips.
cm.max_cx = 0 # the largest cx seen
cm.max_cid = 0 # the largest cid seen
cm.max_roi = [0,0,0,0]
cm.x2_lbl = \
{
'cid' : lambda _: cm.cx2_cid[_],\
'nid' : lambda _: cm.cx2_nid(_),\
'gid' : lambda _: cm.cx2_gid(_),\
'gname': lambda _: cm.cx2_gname(_),\
'name' : lambda _: cm.cx2_name(_),\
'roi' : lambda _: cm.cx2_roi[_],\
'theta': lambda _: cm.cx2_theta[_],\
'cx' : lambda _: _ ,\
'nx' : lambda _: cm.cx2_nx[_] ,\
'gx' : lambda _: cm.cx2_gx[_] ,\
}
cm.user_props = {}
cm.default_fields = ['cid','gid','nid','roi','theta']
def add_user_prop(cm, new_prop):
if not new_prop in cm.user_props.keys():
if ',' in new_prop or '\n' in new_prop:
logerr('Properties cannot have commas or newlines')
return False
# Allocate data for property
# TODO: User prop must be a string
cm.user_props[new_prop] = np.empty(len(cm.cx2_cid),dtype=object)
# Add property to label map
cm.x2_lbl[new_prop] = lambda _: cm.user_props[new_prop][_]
for cx in iter(cm.get_valid_cxs()):
cm.user_props[new_prop][cx] = ''
def load_csv_line(cm, csv_data, csv_headers):
if csv_headers is None:
csv_headers = cm.default_fields
num_unspecified = len(csv_headers) - len(csv_data)
if num_unspecified != 0:
csv_data += ['' for _ in xrange(num_unspecified)]
unspecified_type = ['data','headers'][num_unspecified > 0]
logwarn(('\n\nIn chip_file: %d unspecified %s\n'+\
'csv_headers=%r\n'+\
'csv_data=%r\n\n')\
% (abs(num_unspecified), unspecified_type, csv_headers, csv_data))
# Build field name -> field value map
dmap = {k:v for (k,v) in zip(csv_headers,csv_data)}
if cm.hs.core_prefs.legacy_bit:
# Legacy: Be Backwards Compatible
if 'imgindex' in dmap.keys():
logwarn('Found imgindex')
imgindex = int(dmap['imgindex'])
gname = 'img-%07d.jpg' % imgindex
cm.hs.gm.add_img(int(imgindex), gname, False)
dmap['gid'] = imgindex
dmap['cid'] = imgindex
del dmap['imgindex']
if 'animal_name' in dmap.keys():
logwarn('Found animal_name')
dmap['nid'] = cm.hs.nm.add_name(-1, dmap['animal_name'])
del dmap['animal_name']
if 'instance_id' in dmap.keys():
dmap['cid'] = dmap['instance_id']
del dmap['instance_id']
if 'image_id' in dmap.keys():
dmap['gid'] = dmap['image_id']
del dmap['image_id']
if 'name_id' in dmap.keys():
dmap['nid'] = dmap['name_id']
del dmap['name_id']
# Read IDs
cid = int(dmap['cid']); del dmap['cid']
gid = int(dmap['gid']); del dmap['gid']
nid = int(dmap['nid']); del dmap['nid']
# Read Theta
try:
theta = np.float32(dmap['theta'])
del dmap['theta']
except KeyError as ex:
theta = 0
# Read ROI
roi_str = re.sub(' *',' ', dmap['roi'].replace(']','').replace('[','')).strip(' ').rstrip()
roi = map(lambda x: int(round(float(x))),roi_str.split(' '))
del dmap['roi']
# Read User Props, whatever is left in dmap
props = dmap
nx = cm.hs.nm.nid2_nx[nid]
gx = cm.hs.gm.gid2_gx[gid]
if gx == 0 or nx == 0 or gid == 0 or nid == 0:
err_msg = 'Adding Chip: (cid=%d),(nid=%d,nx=%d),(gid=%d,gx=%d)' % (cid, nid, nx, gid, gx)
err_msg += '\nChip has invalid indexes. (Maybe you deleted an image from the images directory?) '
logwarn(err_msg)
cm.add_chip(cid, nx, gx, roi, theta, props=props, delete_prev=False)
def get_csv_line(headers):
cm.cx2_info(lbls=['cid','gid','nid','roi','theta'])
pass
def chip_alloc(cm, nAlloc):
'Allocate room for nAlloc more chips'
logdbg('Allocating room for %d more chips' % nAlloc)
cm.cx2_cid = np.append(cm.cx2_cid, np.zeros(nAlloc,dtype=np.uint32))
# Implicit Data Local Identifiers
cm.cx2_nx = np.append(cm.cx2_nx, np.zeros(nAlloc,dtype=np.uint32))
cm.cx2_gx = np.append(cm.cx2_gx, np.zeros(nAlloc,dtype=np.uint32))
# Explicit Data
cm.cx2_roi = np.append(cm.cx2_roi, np.zeros((nAlloc,4),dtype=np.uint32), axis=0)
cm.cx2_theta = np.append(cm.cx2_theta, np.zeros(nAlloc,dtype=np.float32), axis=0)
cm.cx2_fpts = np.append(cm.cx2_fpts, np.empty(nAlloc,dtype=object))
cm.cx2_fdsc = np.append(cm.cx2_fdsc, np.empty(nAlloc,dtype=object))
# Feature Representation
cm.cx2_dirty_bit = np.append(cm.cx2_dirty_bit, np.ones(nAlloc,dtype=np.bool))
# Reverse Index
idAlloc = len(cm.cid2_cx) - len(cm.cx2_cid)
if idAlloc > 0:
cm.cid2_cx = np.append(cm.cid2_cx, np.zeros(idAlloc,dtype=np.uint32))
# User Properties
for prop in cm.user_props.iterkeys():
cm.user_props[prop] = np.append(cm.user_props[prop], np.empty(nAlloc,dtype=object))
def cx2_info(cm, cxs=None, lbls=None):
#returns info in formatted table
if cxs is None: cxs = cm.get_valid_cxs()
if lbls is None: lbls = cm.default_fields
if lbls == 'all': lbls = cm.default_fields + cm.user_props.keys()
data_table_str = cm.x2_info(cxs, lbls)
return '# ChipManager\n'+data_table_str
# More convinence functions
def get_valid_cxs(cm):
return pylab.find(cm.cx2_cid > 0)
def get_invalid_cxs(cm):
return pylab.find(cm.cx2_cid == 0)
def invalid_cxs(cm):
'depricated'
return cm.get_invalid_cxs()
def cx2_num_other_chips(cm, cxs):
'returns the number of other.hips beloning to the same name'
return np.array(map(lambda x: len(x), cm.cx2_other_cxs(cxs)),dtype=np.uint32)
def cx2_name(cm, cxs):
nxs = cm.cx2_nx[cxs]
return cm.hs.nm.nx2_name[nxs]
def cx2_gname(cm, cxs):
gxs = cm.cx2_gx[cxs]
return cm.hs.gm.gx2_gname[gxs]
def cx2_img(cm, cx):
gx = cm.cx2_gx[cx]
return cm.hs.gm.gx2_img(gx)
def cx2_img_list(cm, cx_list):
gx_list = cm.cx2_gx[cx_list]
return cm.hs.gm.gx2_img_list(gx_list)
def cx2_nid(cm,cxs):
nxs = cm.cx2_nx[cxs]
return cm.hs.nm.nx2_nid[nxs]
def cx2_gid(cm,cxs):
gxs = cm.cx2_gx[cxs]
return cm.hs.gm.gx2_gid[gxs]
def cid2_nid(cm,cids):
cxs = cm.cid2_cx(cids)
return cm.cx2_nid[cxs]
def cx2_other_cxs(cm,cx_list):
nm = cm.hs.nm
nx_list = cm.cx2_nx[cx_list]
other_cxs = nm.nx2_cx_list[nx_list]
UNIDEN_NX = 1
return [ocx if nx != UNIDEN_NX else [] for (nx,ocx) in zip(nx_list, other_cxs)]
#return [nm.nx2_cx_list[nx] for nx in nx_list]
def all_cxs(cm):
return np.array(pylab.find(cm.cx2_cid > 0), dtype=np.uint32)
def cid2_valid_bit(cm,cids): # Tests if CID is managed.
if type(cids) is types.ListType:
# Check InverseIndex Overflow
valid_bit = np.array([id <= cm.max_cid for id in cids])
valid_cxs = [cm.cid2_cx[cid] for cid in cids[valid_bit]]
valid_bit[valid_bit] = [cx > 0 for cx in valid_cxs]
else: #Non-List Case
valid_bit = cm.max_cid > cids and cm.cid2_cx[cids] > 0
return valid_bit
# --- ACTUAL WORK FUNCTIONS
def add_chip(cm, cid, nx, gx, roi, theta, props={}, delete_prev=False):
nm = cm.hs.nm
gm = cm.hs.gm
# Fails if cid is not available; cid = -1 means pick for you
cx = -1
if cid < 0:
cid = cm.next_cid
else:
if cm.cid2_valid_bit(cid): #New CID must be invalid
logerr('CID Already in database Chip Not Added')
logerr('Offending String: (cid, nx, gx, [roi]) = (%d, %d, %d, %s)' % (cid, nx, gx, str(roi)))
cid = 0
return
#Manage Memory
cx = cm.next_cx
logdbg('''
Adding Chip =
( cid, nx, gx, [tl_x tl_y w h ])
( %4d, %4d, %4d, %s)
'''% (cid, nx, gx, str('[ %4.1f %4.1f %4.1f %4.1f ]' % tuple(roi))))
if cx >= len(cm.cx2_cid):
curr_alloc = len(cm.cx2_cid)
cm.chip_alloc((curr_alloc+1)*2+1)
# Add the information to the flat table
logdbg(' * Adding cx='+str(cx)+' to the tables')
if nx == 0 or gx == 0 or len(roi) != 4:
logerr('Chip information is invalid. Cannot add.')
if delete_prev:
cm.delete_computed_cid(cid)
cm.cx2_cid[cx] = cid
cm.cx2_nx [cx] = nx
cm.cx2_gx [cx] = gx
cm.cx2_roi[cx] = roi
cm.cx2_theta[cx] = theta
cm.max_roi = map(lambda (a,b): max(a,b), zip(cm.max_roi, roi))
# Add This Chip To Reverse Indexing
if cid >= len(cm.cid2_cx):
idAlloc = max(cid+1,len(cm.cid2_cx)*2 + 1)
logdbg('Allocating: '+str(idAlloc)+' more cids')
cm.cid2_cx = np.append(cm.cid2_cx, np.zeros(idAlloc,dtype=np.uint32))
cm.cid2_cx[cid] = cx
nm.nx2_cx_list[nx].append(cx)
gm.gx2_cx_list[gx].append(cx)
# Add user props
for key in cm.user_props.keys():
if not key in props.keys():
cm.user_props[key][cx] = ''
for key,val in props.iteritems():
cm.add_user_prop(key)
cm.user_props[key][cx] = val
# Increment Data Counters
cm.next_cx = max(cm.next_cx + 1, cx+1)
cm.next_cid = max(cm.next_cid+1, cid+1)
cm.max_cx = max(cm.max_cx, cx)
cm.max_cid = max(cm.max_cid, cid)
cm.num_c = cm.num_c + 1
cm.hs.vm.isDirty = True
return cid
def delete_computed_cid(cm, cid):
iom = cm.hs.iom
if np.iterable(cid): logerr('this function only works for a single cid')
logmsg('Removing CID=%d\'s computed files' % cid)
cid_fname_pattern = iom.get_chip_prefix(cid, [])+'*'
iom.remove_computed_files_with_pattern(cid_fname_pattern)
def remove_chip(cm, cx):
cx_list = [cx]
if type(cx) == types.ListType:
cx_list = cx
logdbg('Removing CXs '+str(cx_list))
for cx in cx_list:
# Remove data saved on disk and memory
cm.hs.on_cx_modified(cx)
cid = cm.cx2_cid[cx]
logmsg('Removing cid=%d' % cid)
# Remove cx from other.data managers
gx = cm.cx2_gx[cx]
nx = cm.cx2_nx[cx]
cm.hs.gm.gx2_cx_list[gx].remove(cx)
cm.hs.nm.nx2_cx_list[nx].remove(cx)
# Remove data saved in memory
cm.cx2_cid[cx] = 0
cm.cx2_nx[cx] = 0
cm.cx2_gx[cx] = 0
cm.cx2_roi[cx] = np.array([0,0,0,0],dtype=np.uint32)
cm.cx2_theta[cx] = 0
cm.cid2_cx[cid] = 0
def change_orientation(cm, cx, new_theta):
cid = cm.cx2_cid[cx]
logmsg('Giving cid=%d new theta: %r' % (cid, new_theta))
assert not new_theta is None
cm.hs.on_cx_modified(cx)
cm.cx2_theta[cx] = new_theta
def change_roi(cm, cx, new_roi):
cid = cm.cx2_cid[cx]
logmsg('Giving cid=%d new roi: %r' % (cid, new_roi))
assert not new_roi is None
if new_roi is None:
logerr('The ROI is np.empty')
cm.hs.on_cx_modified(cx)
cm.cx2_roi[cx] = new_roi
def rename_chip(cm, cx, new_name):
nm = cm.hs.nm
cid = cm.cid(cx)
old_nx = cm.cx2_nx[cx]
old_name = nm.nx2_name[old_nx]
if old_name == new_name:
logdbg('new_name == old_name')
return
logmsg('Renaming cid='+str(cid)+' from '+str(old_name)+' to '+new_name)
if not new_name in nm.name2_nx.keys():
nm.add_name(-1,new_name)
old_nx = cm.cx2_nx[cx]
new_nx = nm.name2_nx[new_name]
#Debug
old_nid = nm.nx2_nid[old_nx]
new_nid = nm.nx2_nid[new_nx]
logdbg('Old Name Info: cid=%d cx=%d, nid=%d, nx=%d, name=%s' % (cid, cx, old_nid, old_nx, old_name))
logdbg('New Name Info: cid=%d cx=%d, nid=%d, nx=%d, name=%s' % (cid, cx, new_nid, new_nx, new_name))
#EndDebug
nm.nx2_cx_list[old_nx].remove(cx)
nm.nx2_cx_list[new_nx].append(cx)
cm.cx2_nx[cx] = new_nx
# --- Raw Image Representation of Chip ---
def cx2_chip_list(cm, cx_list):
if np.iterable(cx_list):
return [cm.cx2_chip(cx) for cx in iter(cx_list) ]
else:
return [cm.cx2_chip(cx_list)]
def cx2_chip(cm, cx):
chip_fpath = cm.cx2_chip_fpath(cx)
# Load chip and rotate it
return np.asarray(
Image.open(chip_fpath).rotate(
cm.cx2_theta[cx]*180/np.pi, resample=Image.BICUBIC, expand=1))
def cx2_chip_size(cm, cx, rotated=False):
return cm._scaled_size(cx, rotated=rotated)
#chip_fpath = cm.cx2_chip_fpath(cx)
#return Image.open(chip_fpath).size
def cx2_T_chip2img(cm, cx, rotated=True):
'Return the transformation from Rotated Chip Space to Image Space'
#------------------------------
# Steps to transform a detection from Chip Space to Image Space
# (Chip Space): roi=[0, 0, cw, ch]
# * translate: -[cw, ch]/2
# * rotate: -theta
# * translate: [ucw, uch]/2
# (Unoriented Chip Space) = roi=[0,0,ucw,ucw]
# * scale: scale_factor
# * translate: rx, ry
# (Image Space): roi=[rx,ry,rw,rh]
#------------------------------
# rotation radians
theta = cm.cx2_theta[cx]
# roi size and translation
(rx, ry, rw, rh) = np.array(cm.cx2_roi[cx], dtype=np.float)
# unrotated size
(ucw, uch) = cm._scaled_size(cx, rotated=False, dtype=np.float)
# rotated size
(cw, ch) = cm._scaled_size(cx, rotated=True, dtype=np.float)
# Translation Variables
ctx, cty = ( cw/2, ch/2)
uctx, ucty = (ucw/2, uch/2)
sfx, sfy = rw/ucw, rh/uch
sinth = np.sin(theta)
costh = np.cos(theta)
# Translate to centered rotated
trans_center = np.array(
([ 1, 0, -ctx],
[ 0, 1, -cty],
[ 0, 0, 1]), dtype=np.float)
# unrotate
unrotate = np.array(
([costh, -sinth, 0],
[sinth, costh, 0],
[ 0, 0, 1]), dtype=np.float)
# translate to uncentered unrotated
trans_uncenter = np.array(
([ 1, 0, uctx],
[ 0, 1, ucty],
[ 0, 0, 1]), dtype=np.float)
# Unscale to untranslated image space
unscale = np.array(
([ sfx, 0, 0],
[ 0, sfy, 0],
[ 0, 0, 1]), dtype=np.float)
# Translate into image scale
trans_img = np.array(
([ 1, 0, rx],
[ 0, 1, ry],
[ 0, 0, 1]), dtype=np.float)
#return trans_center.dot(unrotate).dot(trans_uncenter).dot(unscale).dot(trans_img)
return trans_img.dot(unscale).dot(trans_uncenter).dot(unrotate).dot(trans_center)
def cx2_T_chip2unrotated(cm, cx, rotated=True):
'Return the transformation from Rotated Chip Space to Image Space'
# rotation radians
theta = cm.cx2_theta[cx]
# roi size and translation
(rx, ry, rw, rh) = np.array(cm.cx2_roi[cx],dtype=np.float)
# unrotated size
(ucw, uch) = cm._scaled_size(cx, rotated=False, dtype=np.float)
# rotated size
(cw, ch) = cm._scaled_size(cx, rotated=True, dtype=np.float)
# Translation Variables
ctx, cty = ( cw/2, ch/2)
uctx, ucty = (ucw/2, uch/2)
# Translate to centered rotated
trans_center = np.array(([ 1, 0, -ctx],
[ 0, 1, -cty],
[ 0, 0, 1]), dtype=np.float32)
# unrotate
unrotate = np.array(([np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[ 0, 0, 1]), dtype=np.float32)
# translate to uncentered unrotated
trans_uncenter = np.array(([ 1, 0, uctx],
[ 0, 1, ucty],
[ 0, 0, 1]), dtype=np.float32)
#return trans_center.dot(unrotate).dot(trans_uncenter).dot(unscale).dot(trans_img)
return trans_uncenter.dot(unrotate).dot(trans_center)
def cx2_chip_fpath(cm, cx):
'Gets chip fpath with checks'
iom = cm.hs.iom
cid = cm.cid(cx)
chip_fpath = iom.get_chip_fpath(cid)
if not os.path.exists(chip_fpath):
hotspotter.ChipFunctions.precompute_chips(cm.hs, cx_list=[cx], num_procs=1, force_recompute=True)
return chip_fpath
# --- Feature Representation Methods ---
def get_feats(cm, cx, force_recomp=False):
# FIXME: If the algorithm changes, the dirty bit is not flipped
if force_recomp or\
cm.cx2_fpts[cx] is None or\
cm.cx2_fdsc[cx] is None or\
np.sum(cm.cx2_dirty_bit[cx]):
cm.load_features(cx, force_recomp)
return (cm.cx2_fpts[cx], cm.cx2_fdsc[cx])
def get_fpts(cm, cx, force_recomp=False):
if force_recomp or cm.cx2_fpts[cx] is None or np.sum(cm.cx2_dirty_bit[cx]):
cm.load_features(cx, force_recomp)
return cm.cx2_fpts[cx]
def get_fdsc(cm, cx, force_recomp=False):
if force_recomp or cm.cx2_fdsc[cx] is None or np.sum(cm.cx2_dirty_bit[cx]):
cm.load_features(cx, force_recomp)
return cm.cx2_fdsc[cx]
def cx2_nfpts(cm, cxs=None):
if cxs == None:
cxs = cm.all_cxs()
if type(cxs) in [np.uint32, types.IntType]:
cxs = np.array([cxs],dtype=np.uint32)
return np.array([cm.cx2_fpts[cx].shape[0] for cx in cxs], dtype=np.uint32)
# --- Internals ---
def _scaled_size(cm, cx, dtype=float, rotated=False):
'''Returns the ChipSpace size of cx. Without considering rotation
Depends on the current algorithm settings
dtype specifies the percision of return type'''
# Compute Unrotated Chip Space
# Get raw size and target sizze
(_, _, rw, rh) = cm.cx2_roi[cx]
target_diag_pxls = cm.hs.am.algo_prefs.preproc.sqrt_num_pxls
# HACK: Double the size like Lowe; instead of normalizing
if target_diag_pxls == -1:
current_num_diag_pxls = np.sqrt(rw**2 + rh**2)
target_diag_pxls = current_num_diag_pxls*2 # max(, 5000)
ar = np.float(rw)/np.float(rh) # aspect ratio
if ar > 4 or ar < .25:
logwarn( 'Aspect ratio for cx=%d %.2f may be too extreme' % (cx, ar))
# Compute Unoriented scaled chip's width and height
ucw = np.sqrt(ar**2 * target_diag_pxls**2 / (ar**2 + 1))
uch = ucw / ar
# Rotate Unrotated Chip Space into Rotated Chip Space
if rotated:
theta = cm.cx2_theta[cx]
rot = np.array(([np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]), dtype=np.float)
# Extend of Unrotated Chip Space. Center shifted to the origin
pts_00 = np.array([(0,0), (ucw,0), (ucw,uch), (0, uch)]) - np.array((ucw, uch))/2
rot_pts = pts_00.dot(rot)
xymin = rot_pts.min(0)
xymax = rot_pts.max(0)
# Floating point Rotated Chip w/h
cw, ch = xymax - xymin
else:
# Floating point Unrotated Chip w/h
cw, ch = ucw, uch
# Convert to the specified dtype at the end
if dtype is np.float:
return cw, ch
elif np.dtype(dtype).kind == 'f':
return dtype(cw), dtype(ch)
else:
return dtype(round(cw)), dtype(round(ch))
# DEPRICATED
def _cut_out_roi(cm, img, roi):
logdbg('Image shape is: '+str(img.shape))
[gh, gw] = [ x-1 for x in img.shape[0:2] ]
[rx1,ry1,rw,rh] = [ max(0,x) for x in roi]
rx2 = min(gw, rx1+rw)
ry2 = min(gh, ry1+rh)
logdbg('Cutting out chip using: '+str((ry1,ry2,rx1,rx2)))
raw_chip = img[ ry1:ry2, rx1:rx2, : ]
return raw_chip
# DEPRICATED
def cx2_raw_chip(cm, cx):
# --- Cut out the Raw Chip from Img
# TODO: Save raw chips to disk?
gm = cm.hs.gm
gx = cm.cx2_gx[cx]
roi = cm.cx2_roi[cx]
# Read Image
img = gm.gx2_img(gx)
return cm._cut_out_roi(img, roi)
# TODO: Just have a flag for each preprocessing step.
# Move this over from AlgorithmManager
# DEPRICATEDISH
def cx2_pil_chip(cm, cx, scaled=True, preprocessed=True, rotated=False, colored=False):
am = cm.hs.am
# Convert the raw image to PIL, and uncolor unless otherwise requested
if not colored:
pil_chip = Image.fromarray( cm.cx2_raw_chip(cx) ).convert('L')
else:
pil_chip = Image.fromarray( cm.cx2_raw_chip(cx) )
# Scale the image to its processed size
if scaled:
new_size = cm._scaled_size(cx, dtype=int, rotated=False)
pil_chip = pil_chip.resize(new_size, Image.ANTIALIAS)
if preprocessed:
pil_chip = cm.hs.am.preprocess_chip(pil_chip)
# Default do not rotate. Preprocessing is done beforehand
if rotated:
angle_degrees = cm.cx2_theta[cx]*180/np.pi
pil_chip = pil_chip.rotate(angle_degrees, resample=Image.BICUBIC, expand=1)
return pil_chip
#def compute_chip(cm, cx, showmsg=True):
#compute_chip_driver(cm.hs, cx, showmsg)
#TODO Save a raw chip and thumb
#iom = cm.hs.iom
#am = cm.hs.am
#cid = cm.cx2_cid[cx]
#chip_fpath = iom.get_chip_fpath(cid)
#chip_fname = os.path.split(chip_fpath)[1]
#if showmsg:
#logmsg(('\nComputing Chip: cid=%d fname=%s\n'+am.get_algo_name(['preproc'])) % (cid, chip_fname))
## --- Preprocess the Raw Chip
## Chip will be roated on disk np.load. Just scale for now
#chip = cm.cx2_pil_chip(cx, scaled=True, preprocessed=True,
#rotated=False, colored=False)
#logdbg('Saving Computed Chip to :'+chip_fpath)
#chip.save(chip_fpath, 'PNG')
# --- Write Chip and Thumbnail to disk
#chip_thumb_fpath = iom.get_chip_thumb_fpath(cid)
#(cw, ch) = chip.size
#thumb_size = cm.hs.dm.draw_prefs.thumbnail_size
#thumb_scale = min(thumb_size/float(cw), thumb_size/float(ch))
#(tw, th) = (int(round(cw)), int(round(ch)))
#chip_thumb = chip.resize((tw, th), Image.ANTIALIAS)
#if showmsg:
#logdbg('Saving Computed Chip Thumb to :'+chip_thumb_fpath)
#chip_thumb.save(chip_thumb_fpath, 'JPEG')
def load_features(cm, _cxs=None, force_recomp=False):
if _cxs is None:
cxs = cm.get_valid_cxs()
elif type(_cxs) is types.ListType:
cxs = np.array(_cxs)
elif type(_cxs) in [types.IntType, types.LongType, np.uint32]:
cxs = np.array([_cxs])
else:
cxs = _cxs
count_feat = 0
is_dirty = np.bitwise_or(cm.cx2_dirty_bit[cxs], force_recomp)
num_samp = cxs.size
num_dirty = np.sum(is_dirty)
# HACKS
if not np.iterable(is_dirty):
is_dirty = np.array([is_dirty])
if not np.iterable(cxs):
cxs = np.array([cxs])
load_cx = cxs[is_dirty]
num_clean = num_samp - num_dirty
#logdbg('Loading Features: Dirty=%d ; #Clean=%d' % (num_dirty, num_clean))
if num_dirty == 0:
return
logio('Loading %d Feature Reps' % num_dirty)
am = cm.hs.am
for cx in iter(load_cx):
cid = cm.cx2_cid[cx]
if cid <= 0:
logwarn('WARNING: IX='+str(cx)+' is invalid'); continue
chiprep_fpath = cm.hs.iom.get_chiprep_fpath(cid)
# Ensure that the features exists
if force_recomp or not os.path.exists(chiprep_fpath):
logio('Computing and saving features of cid='+str(cid))
hotspotter.ChipFunctions.precompute_chipreps(cm.hs, [cx], num_procs=1, force_recompute=force_recomp)
# Load the features
logdbg('Loading features in '+chiprep_fpath)
npz = np.load(chiprep_fpath)
fpts = npz['arr_0']
fdsc = npz['arr_1']
npz.close()
cm.cx2_fpts[cx] = fpts
cm.cx2_fdsc[cx] = fdsc
cm.cx2_dirty_bit[cx] = False
count_feat += len(fpts)
logdbg('* Loaded '+str(count_feat)+' keypoints and fdscriptors' )
return True
def unload_features(cm, cxs):
if not np.iterable(cxs):
cxs = [cxs]
nRequest = len(cxs)
nUnload = nRequest - np.sum(cm.cx2_dirty_bit[cxs])
# Print unloaded cxs unless there are more than 3
logdbg('Unloading features: %r' % cxs)
logmsg('Unloading %d/%d features: ' % (nUnload, nRequest))
cm.cx2_fpts[cxs] = np.empty(nUnload,dtype=object)
cm.cx2_fdsc[cxs] = np.empty(nUnload,dtype=object)
cm.cx2_fdsc[cxs] = np.empty(nUnload,dtype=object)
cm.cx2_dirty_bit[cxs] = True
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SE-ResNet (50/101/152) v1.0
# Paper: https://arxiv.org/pdf/1709.01507.pdf
import tensorflow as tf
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add
def stem(inputs):
""" Construct the Stem Convolutional Group
inputs : the input vector
"""
# The 224x224 images are zero padded (black - no signal) to be 230x230 images prior to the first convolution
x = ZeroPadding2D(padding=(3, 3))(inputs)
# First Convolutional layer which uses a large (coarse) filter
x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Pooled feature maps will be reduced by 75%
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
return x
def learner(x, groups, ratio):
""" Construct the Learner
x : input to the learner
groups: list of groups: number of filters and blocks
ratio : amount of filter reduction in squeeze
"""
# First Residual Block Group (not strided)
n_filters, n_blocks = groups.pop(0)
x = group(x, n_filters, n_blocks, ratio, strides=(1, 1))
# Remaining Residual Block Groups (strided)
for n_filters, n_blocks in groups:
x = group(x, n_filters, n_blocks, ratio)
return x
def group(x, n_filters, n_blocks, ratio, strides=(2, 2)):
""" Construct the Squeeze-Excite Group
x : input to the group
n_blocks : number of blocks
n_filters: number of filters
ratio : amount of filter reduction during squeeze
strides : whether projection block is strided
"""
# first block uses linear projection to match the doubling of filters between groups
x = projection_block(x, n_filters, strides=strides, ratio=ratio)
# remaining blocks use identity link
for _ in range(n_blocks-1):
x = identity_block(x, n_filters, ratio=ratio)
return x
def squeeze_excite_block(x, ratio=16):
""" Create a Squeeze and Excite block
x : input to the block
ratio : amount of filter reduction during squeeze
"""
# Remember the input
shortcut = x
# Get the number of filters on the input
filters = x.shape[-1]
# Squeeze (dimensionality reduction)
# Do global average pooling across the filters, which will output a 1D vector
x = GlobalAveragePooling2D()(x)
# Reshape into 1x1 feature maps (1x1xC)
x = Reshape((1, 1, filters))(x)
# Reduce the number of filters (1x1xC/r)
x = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(x)
# Excitation (dimensionality restoration)
# Restore the number of filters (1x1xC)
x = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(x)
# Scale - multiply the squeeze/excitation output with the input (WxHxC)
x = Multiply()([shortcut, x])
return x
def identity_block(x, n_filters, ratio=16):
""" Create a Bottleneck Residual Block with Identity Link
x : input into the block
n_filters: number of filters
ratio : amount of filter reduction during squeeze
"""
# Save input vector (feature maps) for the identity link
shortcut = x
## Construct the 1x1, 3x3, 1x1 residual block (fig 3c)
# Dimensionality reduction
x = Conv2D(n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Bottleneck layer
x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding="same", use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Dimensionality restoration - increase the number of output filters by 4X
x = Conv2D(n_filters * 4, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
# Pass the output through the squeeze and excitation block
x = squeeze_excite_block(x, ratio)
# Add the identity link (input) to the output of the residual block
x = Add()([shortcut, x])
x = ReLU()(x)
return x
def projection_block(x, n_filters, strides=(2,2), ratio=16):
""" Create Bottleneck Residual Block with Projection Shortcut
Increase the number of filters by 4X
x : input into the block
n_filters: number of filters
strides : whether entry convolution is strided (i.e., (2, 2) vs (1, 1))
ratio : amount of filter reduction during squeeze
"""
# Construct the projection shortcut
# Increase filters by 4X to match shape when added to output of block
shortcut = Conv2D(4 * n_filters, (1, 1), strides=strides, use_bias=False, kernel_initializer='he_normal')(x)
shortcut = BatchNormalization()(shortcut)
## Construct the 1x1, 3x3, 1x1 residual block (fig 3c)
# Dimensionality reduction
# Feature pooling when strides=(2, 2)
x = Conv2D(n_filters, (1, 1), strides=strides, use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Bottleneck layer
x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Dimensionality restoration - increase the number of filters by 4X
x = Conv2D(4 * n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
# Pass the output through the squeeze and excitation block
x = squeeze_excite_block(x, ratio)
# Add the projection shortcut link to the output of the residual block
x = Add()([x, shortcut])
x = ReLU()(x)
return x
def classifier(x, n_classes):
""" Create the Classifier Group
x : input to the classifier
n_classes : number of output classes
"""
# Pool at the end of all the convolutional residual blocks
x = GlobalAveragePooling2D()(x)
# Final Dense Outputting Layer for the outputs
outputs = Dense(n_classes, activation='softmax', kernel_initializer='he_normal')(x)
return outputs
# Meta-parameter: # Meta-parameter: list of groups: filter size and number of blocks
groups = { 50 : [ (64, 3), (128, 4), (256, 6), (512, 3) ], # SE-ResNet50
101: [ (64, 3), (128, 4), (256, 23), (512, 3) ], # SE-ResNet101
152: [ (64, 3), (128, 8), (256, 36), (512, 3) ] # SE-ResNet152
}
# Meta-parameter: Amount of filter reduction in squeeze operation
ratio = 16
# The input tensor
inputs = Input(shape=(224, 224, 3))
# The Stem Group
x = stem(inputs)
# The Learnet
x = learner(x, groups[50], ratio)
# The Classifier for 1000 classes
outputs = classifier(x, 1000)
# Instantiate the Model
model = Model(inputs, outputs)
|
|
from __future__ import annotations
import cmath
import numpy as np
import psutil
import ray
import scipy.special as ssp
from pymwm.utils import cylinder_utils
from pymwm.waveguide import Database, Sampling, Waveguide
from .samples import Samples, SamplesForRay, SamplesLowLoss, SamplesLowLossForRay
class Cylinder(Waveguide):
"""A class defining a cylindrical waveguide."""
def __init__(self, params):
"""Init Cylinder class.
Args:
params: A dict whose keys and values are as follows:
'core': A dict of the setting parameters of the core:
'shape': A string indicating the shape of the core.
'size': A float indicating the radius of the circular cross
section [um].
'fill': A dict of the parameters of the core Material.
'clad': A dict of the parameters of the clad Material.
'bounds': A dict indicating the bounds of database.interpolation
and its keys and values are as follows:
'wl_max': A float indicating the maximum wavelength [um]
'wl_min': A float indicating the minimum wavelength [um]
'wl_imag': A float indicating the maximum value of
abs(c / f_imag) [um] where f_imag is the imaginary part
of the frequency.
'modes': A dict of the settings for calculating modes:
'wl_max': A float indicating the maximum wavelength [um]
(default: 5.0)
'wl_min': A float indicating the minimum wavelength [um]
(default: 0.4)
'wl_imag': A float indicating the maximum value of
abs(c / f_imag) [um] where f_imag is the imaginary part
of the frequency. (default: 5.0)
'dw': A float indicating frequency interval
[rad c / 1um]=[2.99792458e14 rad / s]
(default: 1 / 64).
'num_n': An integer indicating the number of orders of
modes.
'num_m': An integer indicating the number of modes in each
order and polarization.
'ls': A list of characters chosen from "h" (horizontal
polarization) and "v" (vertical polarization).
"""
super().__init__(params)
self.u_pec, self.jnu_pec, self.jnpu_pec = self.u_jnu_jnpu_pec(
self.num_n, self.num_m
)
def get_alphas(self, alpha_list: list[tuple[str, int, int]]) -> dict:
alphas: dict = {"h": [], "v": []}
for alpha in [("E", 0, m) for m in range(1, self.num_m + 1)]:
if alpha in alpha_list:
alphas["v"].append(alpha)
for alpha in [
("E", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1)
]:
if alpha in alpha_list:
alphas["h"].append(alpha)
alphas["v"].append(alpha)
for alpha in [("M", 0, m) for m in range(1, self.num_m + 1)]:
if alpha in alpha_list:
alphas["h"].append(alpha)
for alpha in [
("M", n, m) for n in range(1, self.num_n) for m in range(1, self.num_m + 1)
]:
if alpha in alpha_list:
alphas["h"].append(alpha)
alphas["v"].append(alpha)
return alphas
def betas_convs_samples(self, params: dict) -> tuple[dict, dict, Samples]:
im_factor = self.clad.im_factor
self.clad.im_factor = 1.0
self.clad_params["im_factor"] = 1.0
p_modes = params["modes"].copy()
num_n_0 = p_modes["num_n"]
num_m_0 = p_modes["num_m"]
betas: dict = {}
convs: dict = {}
success = False
catalog = Database().load_catalog()
num_n_max = catalog["num_n"].max()
num_m_max = catalog["num_m"].max()
if not np.isnan(num_n_max):
for num_n, num_m in [
(n, m)
for n in range(num_n_0, num_n_max + 1)
for m in range(num_m_0, num_m_max + 1)
]:
p_modes["num_n"] = num_n
p_modes["num_m"] = num_m
smp = Samples(self.r, self.fill_params, self.clad_params, p_modes)
try:
betas, convs = smp.database.load()
success = True
break
except IndexError:
continue
if not success:
p_modes["num_n"] = num_n_0
p_modes["num_m"] = num_m_0
betas, convs, smp = self.do_sampling(p_modes)
if im_factor != 1.0:
self.clad.im_factor = im_factor
self.clad_params["im_factor"] = im_factor
betas, convs, smp = self.do_sampling_for_im_factor(betas, convs, p_modes)
return betas, convs, smp
def do_sampling(self, p_modes: dict) -> tuple[dict, dict, Samples]:
num_n_0 = p_modes["num_n"]
num_m_0 = p_modes["num_m"]
smp = Samples(self.r, self.fill_params, self.clad_params, p_modes)
ray.shutdown()
try:
ray.init()
p_modes_id = ray.put(p_modes)
pool = ray.util.ActorPool(
SamplesForRay.remote(
self.r, self.fill_params, self.clad_params, p_modes_id
)
for _ in range(psutil.cpu_count())
)
xs_success_wr_list: list[tuple[np.ndarray, np.ndarray]] = list(
pool.map(lambda a, arg: a.wr_sampling.remote(arg), range(num_n_0))
)
num_wr = xs_success_wr_list[0][0].shape[0]
args = []
for n in range(num_n_0):
xs_array, _ = xs_success_wr_list[n]
for iwr in range(num_wr):
args.append((n, iwr, xs_array[iwr]))
xs_success_wi_list: list[tuple[np.ndarray, np.ndarray]] = list(
pool.map(lambda a, arg: a.wi_sampling.remote(arg), args)
)
num_wi = xs_success_wi_list[0][0].shape[0]
xs_success_list: list[tuple[np.ndarray, np.ndarray]] = []
for n in range(num_n_0):
xs_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=complex)
success_array = np.zeros((num_wr, num_wi, 2 * num_m_0 + 1), dtype=bool)
for iwr in range(num_wr):
i = num_wr * n + iwr
xs_i, success_i = xs_success_wi_list[i]
xs_array[iwr] = xs_i
success_array[iwr] = success_i
xs_success_list.append((xs_array, success_array))
finally:
ray.shutdown()
betas, convs = smp.betas_convs(xs_success_list)
smp.database.save(betas, convs)
return betas, convs, smp
def do_sampling_for_im_factor(
self, betas: dict, convs: dict, p_modes: dict
) -> tuple[dict, dict, SamplesLowLoss]:
smp = SamplesLowLoss(self.r, self.fill_params, self.clad_params, p_modes)
try:
betas, convs = smp.database.load()
except IndexError:
num_n = p_modes["num_n"]
num_m = p_modes["num_m"]
args = []
for iwr in range(len(smp.ws)):
for iwi in range(len(smp.wis)):
xis_list = []
for n in range(num_n):
xis = []
for i in range(num_m + 1):
xis.append(betas[("M", n, i + 1)][iwr, iwi] ** 2)
for i in range(num_m):
xis.append(betas[("E", n, i + 1)][iwr, iwi] ** 2)
xis_list.append(xis)
args.append((iwr, iwi, xis_list))
try:
ray.init()
p_modes_id = ray.put(p_modes)
pool = ray.util.ActorPool(
SamplesLowLossForRay.remote(
self.r, self.fill_params, self.clad_params, p_modes_id
)
for _ in range(psutil.cpu_count())
)
xs_success_list = list(
pool.map(lambda a, arg: a.task.remote(arg), args)
)
finally:
ray.shutdown()
betas, convs = smp.betas_convs(xs_success_list)
smp.database.save(betas, convs)
return betas, convs, smp
def beta(self, w: complex, alpha: tuple[str, int, int]) -> complex:
"""Return phase constant
Args:
w: A complex indicating the angular frequency
alpha: (pol, n, m)
pol: 'M' (TM-like mode) or 'E' (TE-like mode)
n: The order of the mode
m: The sub order of the mode.
Returns:
h: The phase constant.
"""
if self.clad.label == "PEC":
return self.beta_pec(w, alpha)
wr = w.real
wi = w.imag
hr: float = self.beta_funcs[(alpha, "real")](wr, wi)[0, 0]
hi: float = self.beta_funcs[(alpha, "imag")](wr, wi)[0, 0]
# if hr < 0:
# hr = 1e-16
# if hi < 0:
# hi = 1e-16
return hr + 1j * hi
def beta_pec(self, w: complex, alpha: tuple[str, int, int]) -> complex:
"""Return phase constant of PEC waveguide
Args:
w: A complex indicating the angular frequency
alpha: A tuple (pol, n, m) where pol is 'M' for TM mode or
'E' for TE mode, n is the order of the mode, and m is the
number of modes in the order and the polarization.
Returns:
h: A complex indicating the phase constant.
"""
w_comp = w.real + 1j * w.imag
pol, n, m = alpha
if pol == "E":
chi = ssp.jnp_zeros(n, m)[-1]
elif pol == "M":
chi = ssp.jn_zeros(n, m)[-1]
else:
raise ValueError("pol must be 'E' or 'M")
val = cmath.sqrt(self.fill(w_comp) * w_comp ** 2 - chi ** 2 / self.r ** 2)
if abs(val.real) > abs(val.imag):
if val.real < 0:
val *= -1
else:
if val.imag < 0:
val *= -1
return val
def coef(self, h, w, alpha):
"""Return the coefficients of TE- and TM- components which compose
the hybrid mode.
Args:
h: A complex indicating the phase constant.
w: A complex indicating the angular frequency
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
Returns:
a: A complex indicating the coefficient of TE-component
b: A complex indicating the coefficient of TM-component
"""
e1 = self.fill(w)
e2 = self.clad(w)
pol, n, m = alpha
w = w.real + 1j * w.imag
h = h.real + 1j * h.imag
if e2.real < -1e6:
if pol == "E":
norm = self.norm(w, h, alpha, 1.0 + 0.0j, 0.0j)
ai, bi = 1.0 / norm, 0.0
else:
norm = self.norm(w, h, alpha, 0.0j, 1.0 + 0.0j)
ai, bi = 0.0, 1.0 / norm
else:
u = self.samples.u(h ** 2, w, e1)
v = self.samples.v(h ** 2, w, e2)
knv = ssp.kv(n, v)
knpv = ssp.kvp(n, v)
jnu = ssp.jv(n, u)
jnpu = ssp.jvp(n, u)
ci = -n * (u ** 2 + v ** 2) * jnu * knv / (u * v)
if pol == "E":
ci *= (h / w) ** 2
ci /= e1 * jnpu * v * knv + e2 * knpv * u * jnu
norm = self.norm(w, h, alpha, 1.0 + 0.0j, ci)
ai = 1.0 / norm
bi = ci / norm
else:
ci /= jnpu * v * knv + knpv * u * jnu
norm = self.norm(w, h, alpha, ci, 1.0 + 0.0j)
bi = 1.0 / norm
ai = ci / norm
return ai, bi
def norm(self, w, h, alpha, a, b):
pol, n, m = alpha
en = 1 if n == 0 else 2
if self.clad(w).real < -1e6:
radius = self.r
if pol == "E":
u = ssp.jnp_zeros(n, m)[-1]
jnu = ssp.jv(n, u)
jnpu = 0.0
else:
u = ssp.jn_zeros(n, m)[-1]
jnu = 0.0
jnpu = ssp.jvp(n, u)
return cmath.sqrt(
a ** 2 * np.pi * radius ** 2 / en * (1 - n ** 2 / u ** 2) * jnu ** 2
+ b ** 2 * np.pi * radius ** 2 / en * jnpu ** 2
)
u = self.samples.u(h ** 2, w, self.fill(w))
jnu = ssp.jv(n, u)
jnpu = ssp.jvp(n, u)
v = self.samples.v(h ** 2, w, self.clad(w))
knv = ssp.kv(n, v)
knpv = ssp.kvp(n, v)
val_u = 2 * np.pi * self.r ** 2 / en
val_v = val_u * ((u * jnu) / (v * knv)) ** 2
upart_diag = self.upart_diag(n, u, jnu, jnpu)
vpart_diag = self.vpart_diag(n, v, knv, knpv)
upart_off = self.upart_off(n, u, jnu)
vpart_off = self.vpart_off(n, v, knv)
return cmath.sqrt(
val_u
* (
a * (a * upart_diag + b * upart_off)
+ b * (b * upart_diag + a * upart_off)
)
- val_v
* (
a * (a * vpart_diag + b * vpart_off)
+ b * (b * vpart_diag + a * vpart_off)
)
)
@staticmethod
def upart_diag(n, u, jnu, jnpu):
return jnu * jnpu / u + (jnpu ** 2 + (1 - n ** 2 / u ** 2) * jnu ** 2) / 2
@staticmethod
def upart_off(n, u, jnu):
return n * (jnu / u) ** 2
@staticmethod
def vpart_diag(n, v, knv, knpv):
return knv * knpv / v + (knpv ** 2 - (1 + n ** 2 / v ** 2) * knv ** 2) / 2
@staticmethod
def vpart_off(n, v, knv):
return n * (knv / v) ** 2
def Y(
self,
w: complex,
h: complex,
alpha: tuple[str, int, int],
a: complex,
b: complex,
) -> complex:
"""Return the effective admittance of the waveguide mode
Args:
w: A complex indicating the angular frequency
h: A complex indicating the phase constant.
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
a: A complex indicating the coefficient of TE-component
b: A complex indicating the coefficient of TM-component
Returns:
y: A complex indicating the effective admittance
"""
pol, n, m = alpha
e1 = self.fill(w)
e2 = self.clad(w)
en = 1 if n == 0 else 2
if e2.real < -1e6:
if pol == "E":
val = h / w
else:
val = e1 * w / h
else:
u = self.samples.u(h ** 2, w, e1)
jnu = ssp.jv(n, u)
jnpu = ssp.jvp(n, u)
v = self.samples.v(h ** 2, w, e2)
knv = ssp.kv(n, v)
knpv = ssp.kvp(n, v)
val_u = 2 * np.pi * self.r ** 2 / en
val_v = val_u * ((u * jnu) / (v * knv)) ** 2
upart_diag = self.upart_diag(n, u, jnu, jnpu)
vpart_diag = self.vpart_diag(n, v, knv, knpv)
upart_off = self.upart_off(n, u, jnu)
vpart_off = self.vpart_off(n, v, knv)
val = val_u * (
h / w * a * (a * upart_diag + b * upart_off)
+ e1 * w / h * b * (b * upart_diag + a * upart_off)
) - val_v * (
h / w * a * (a * vpart_diag + b * vpart_off)
+ e2 * w / h * b * (b * vpart_diag + a * vpart_off)
)
return val
@staticmethod
def y_te(w, h):
return h / w
def y_tm_inner(self, w, h):
e = self.fill(w)
return e * w / h
def y_tm_outer(self, w, h):
e = self.clad(w)
return e * w / h
def fields(self, x, y, w, dir, alpha, h, coef):
"""Return the electromagnetic field vectors for the specified mode and
point
Args:
x: A float indicating the x coordinate [um]
y: A float indicating the y coordinate [um]
w: A complex indicating the angular frequency
dir: "h" (horizontal polarization) or "v" (vertical polarization)
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
h: A complex indicating the phase constant.
coef: The coefficients of TE- and TM- components
Returns:
f_vec: An array of complexes [ex, ey, ez, hx, hy, hz].
"""
pol, n, m = alpha
a, b = coef
r = np.hypot(x, y)
p = np.arctan2(y, x)
u = self.samples.u(h ** 2, w, self.fill(w))
v = self.samples.v(h ** 2, w, self.clad(w))
ur = u * r / self.r
vr = v * r / self.r
if dir == "h":
fr = np.cos(n * p)
fp = -np.sin(n * p)
else:
fr = np.sin(n * p)
fp = np.cos(n * p)
y_te = Cylinder.y_te(w, h)
if r <= self.r:
y_tm = self.y_tm_inner(w, h)
er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr
er_tm = ssp.jvp(n, ur) * fr
er = a * er_te + b * er_tm
ep_te = ssp.jvp(n, ur) * fp
ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp
ep = a * ep_te + b * ep_tm
ez = u / (1j * h * self.r) * b * ssp.jv(n, ur) * fr
hr = -y_te * a * ep_te - y_tm * b * ep_tm
hp = y_te * a * er_te + y_tm * b * er_tm
hz = -u / (1j * h * self.r) * y_te * a * ssp.jv(n, ur) * fp
else:
y_tm = self.y_tm_outer(w, h)
val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v))
er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val
er_tm = ssp.kvp(n, vr) * fr * val
er = a * er_te + b * er_tm
ep_te = ssp.kvp(n, vr) * fp * val
ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val
ep = a * ep_te + b * ep_tm
ez = -v / (1j * h * self.r) * b * ssp.kv(n, vr) * fr * val
hr = -y_te * a * ep_te - y_tm * b * ep_tm
hp = y_te * a * er_te + y_tm * b * er_tm
hz = v / (1j * h * self.r) * y_te * a * ssp.kv(n, vr) * fp * val
ex = er * np.cos(p) - ep * np.sin(p)
ey = er * np.sin(p) + ep * np.cos(p)
hx = hr * np.cos(p) - hp * np.sin(p)
hy = hr * np.sin(p) + hp * np.cos(p)
return np.array([ex, ey, ez, hx, hy, hz])
def e_field(self, x, y, w, dir, alpha, h, coef):
"""Return the electric field vector for the specified mode and
point
Args:
x: A float indicating the x coordinate [um]
y: A float indicating the y coordinate [um]
w: A complex indicating the angular frequency
dir: "h" (horizontal polarization) or "v" (vertical polarization)
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
h: A complex indicating the phase constant.
coef: The coefficients of TE- and TM- components
Returns:
e_vec: An array of complexes [ex, ey, ez].
"""
pol, n, m = alpha
a, b = coef
r = np.hypot(x, y)
p = np.arctan2(y, x)
u = self.samples.u(h ** 2, w, self.fill(w))
v = self.samples.v(h ** 2, w, self.clad(w))
ur = u * r / self.r
vr = v * r / self.r
if dir == "h":
fr = np.cos(n * p)
fp = -np.sin(n * p)
else:
fr = np.sin(n * p)
fp = np.cos(n * p)
if r <= self.r:
er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr
er_tm = ssp.jvp(n, ur) * fr
er = a * er_te + b * er_tm
ep_te = ssp.jvp(n, ur) * fp
ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp
ep = a * ep_te + b * ep_tm
ez = u / (1j * h * self.r) * b * ssp.jv(n, ur) * fr
else:
val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v))
er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val
er_tm = ssp.kvp(n, vr) * fr * val
er = a * er_te + b * er_tm
ep_te = ssp.kvp(n, vr) * fp * val
ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val
ep = a * ep_te + b * ep_tm
ez = -v / (1j * h * self.r) * b * ssp.kv(n, vr) * fr * val
ex = er * np.cos(p) - ep * np.sin(p)
ey = er * np.sin(p) + ep * np.cos(p)
return np.array([ex, ey, ez])
def h_field(self, x, y, w, dir, alpha, h, coef):
"""Return the magnetic field vectors for the specified mode and
point
Args:
x: A float indicating the x coordinate [um]
y: A float indicating the y coordinate [um]
w: A complex indicating the angular frequency
dir: "h" (horizontal polarization) or "v" (vertical polarization)
alpha: A tuple (pol, n, m) where pol is 'M' for TM-like mode or
'E' for TE-like mode, n is the order of the mode, and m is
the number of modes in the order and the polarization.
h: A complex indicating the phase constant.
coef: The coefficients of TE- and TM- components
Returns:
h_vec: An array of complexes [hx, hy, hz].
"""
pol, n, m = alpha
a, b = coef
r = np.hypot(x, y)
p = np.arctan2(y, x)
u = self.samples.u(h ** 2, w, self.fill(w))
v = self.samples.v(h ** 2, w, self.clad(w))
ur = u * r / self.r
vr = v * r / self.r
if dir == "h":
fr = np.cos(n * p)
fp = -np.sin(n * p)
else:
fr = np.sin(n * p)
fp = np.cos(n * p)
y_te = Cylinder.y_te(w, h)
if r <= self.r:
y_tm = self.y_tm_inner(w, h)
er_te = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fr
er_tm = ssp.jvp(n, ur) * fr
ep_te = ssp.jvp(n, ur) * fp
ep_tm = (ssp.jv(n - 1, ur) + ssp.jv(n + 1, ur)) / 2 * fp
hr = -y_te * a * ep_te - y_tm * b * ep_tm
hp = y_te * a * er_te + y_tm * b * er_tm
hz = -u / (1j * h * self.r) * y_te * a * ssp.jv(n, ur) * fp
else:
y_tm = self.y_tm_outer(w, h)
val = -u * ssp.jv(n, u) / (v * ssp.kv(n, v))
er_te = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fr * val
er_tm = ssp.kvp(n, vr) * fr * val
ep_te = ssp.kvp(n, vr) * fp * val
ep_tm = -(ssp.kv(n - 1, vr) - ssp.kv(n + 1, vr)) / 2 * fp * val
hr = -y_te * a * ep_te - y_tm * b * ep_tm
hp = y_te * a * er_te + y_tm * b * er_tm
hz = v / (1j * h * self.r) * y_te * a * ssp.kv(n, vr) * fp * val
hx = hr * np.cos(p) - hp * np.sin(p)
hy = hr * np.sin(p) + hp * np.cos(p)
return np.array([hx, hy, hz])
@staticmethod
def u_jnu_jnpu_pec(num_n, num_m):
us = np.empty((2, num_n, num_m))
jnus = np.empty((2, num_n, num_m))
jnpus = np.empty((2, num_n, num_m))
for n in range(num_n):
us[0, n] = ssp.jnp_zeros(n, num_m)
us[1, n] = ssp.jn_zeros(n, num_m)
jnus[0, n] = ssp.jv(n, us[0, n])
jnus[1, n] = np.zeros(num_m)
jnpus[0, n] = np.zeros(num_m)
jnpus[1, n] = ssp.jvp(n, us[1, n])
return us, jnus, jnpus
def coefs(self, hs, w):
As = []
Bs = []
for h, s, n, m in zip(hs, self.s_all, self.n_all, self.m_all):
pol = "E" if s == 0 else "M"
ai, bi = self.coef(h, w, (pol, n, m))
As.append(ai)
Bs.append(bi)
return np.ascontiguousarray(As), np.ascontiguousarray(Bs)
def Ys(self, w, hs, As, Bs):
vals = []
for h, s, n, a, b in zip(hs, self.s_all, self.n_all, As, Bs):
pol = "E" if s == 0 else "M"
vals.append(self.Y(w, h, (pol, n, 1), a, b))
return np.array(vals)
def props_numpy(self, w):
e1 = self.fill(w)
e2 = self.clad(w)
hs = np.array([self.beta(w, alpha) for alpha in self.alpha_all])
As, Bs = self.coefs(hs, w)
Ys = self.Ys(w, hs, As, Bs)
if e2.real < -1e6:
us = np.zeros_like(hs, dtype=complex)
jus = np.zeros_like(hs, dtype=complex)
jpus = np.zeros_like(hs, dtype=complex)
for i, (h, s, n, m) in enumerate(
zip(hs, self.s_all, self.n_all, self.m_all)
):
us[i] = self.u_pec[s, n, m - 1]
jus[i] = self.jnu_pec[s, n, m - 1]
jpus[i] = self.jnpu_pec[s, n, m - 1]
vs = (1 - 1j) * np.sqrt(0.5j * (-e2 * w ** 2 + hs ** 2)) * self.r
kvs = np.zeros_like(vs)
kpvs = np.zeros_like(vs)
else:
us = self.samples.u(hs ** 2, w, e1)
vs = self.samples.v(hs ** 2, w, e2)
jus = ssp.jv(self.n_all, us)
jpus = ssp.jvp(self.n_all, us)
kvs = ssp.kv(self.n_all, vs)
kpvs = ssp.kvp(self.n_all, vs)
return hs, us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys
def props(self, w):
e1 = self.fill(w)
e2 = self.clad(w)
hs = np.array([self.beta(w, alpha) for alpha in self.alpha_all])
us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys = cylinder_utils.props_cython(
w,
self.r,
self.s_all,
self.n_all,
self.m_all,
hs,
e1,
e2,
self.u_pec,
self.jnu_pec,
self.jnpu_pec,
)
return hs, us, vs, jus, jpus, kvs, kpvs, As, Bs, Ys
|
|
import sys
sys.path.append('/Users/natj/projects/arcmancer/lib/')
import pyarcmancer as pyac
from img import Imgplane
from visualize_polar import Visualize
from lineprofile import *
import units
import numpy as np
import matplotlib as mpl
from pylab import *
import os
from matplotlib import cm
import scipy.interpolate as interp
#from joblib import Parallel, delayed
#import multiprocessing
outdir = 'out/lines2/'
##################################################
# Set up figure & layout
fig = figure(figsize=(6,10))
mpl.rc('font', family='serif')
mpl.rc('xtick', labelsize='x-small')
mpl.rc('ytick', labelsize='x-small')
mpl.rcParams['image.cmap'] = 'inferno'
#num_cores = multiprocessing.cpu_count()
#print "num of cores {}", num_cores
#Setup pyarcmancer
##################################################
conf = pyac.Configuration()
conf.absolute_tolerance = 1e-12
conf.relative_tolerance = 1e-12
conf.henon_tolerance = 1e-8
conf.sampling_interval = 1e-3
conf.minimum_stepsize = 1e-10
conf.maximum_steps = 10000
conf.enforce_maximum_stepsize = False
conf.enforce_minimum_stepsize = True
conf.enforce_maximum_steps = True
conf.store_only_endpoints = True
#pyac.Log.set_console()
pyac.Log.set_file()
##################################################
# Star parameters
#R = 12.0
#M = 1.6
freq = 700.0
#incl = 15.0
#for M in [1.5, 1.1, 1.8]:
for M in [1.4]:
print "##################################################"
print "M = ", M
for R in [10.0]:
print "##################################################"
print " R = ", R
#for incl in [90, 80, 70, 60, 50, 40, 30, 20, 15, 10, 5, 1]:
#for incl in [9, 8, 7, 6, 4, 3, 2, 0.5]:
for incl in [20.0]:
print "##################################################"
print " i = ",incl
fname = 'neutronstar_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.png'.format( np.int(freq), np.int(R), M, np.int(incl))
if os.path.isfile( outdir+fname ):
continue
# Variables in units of solar mass are derived here
# and typically presented with full name
mass = M
radius = R * units.solar_mass_per_km / mass
angvel = freq * 2.0*np.pi / units.solar_mass_per_s * mass
imgscale = (mass/units.solar_mass_per_km*1.0e5)**2 #cm^2/Msun
compactness = np.sqrt(1 - 2/radius) #isotropic radius compactness
conf.absolute_tolerance = 1e-12 * radius
conf.minimum_stepsize = 1e-10 * radius
##################################################
#Define metric and surfaces of the spacetime
#S+D
metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_no_quadrupole)
ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.spherical)
#Oblate Sch #WORKS
#metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_no_quadrupole)
#ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.agm_no_quadrupole)
#Full AGM + oblate
#metric = pyac.AGMMetric(radius, 1.0, angvel, pyac.AGMMetric.MetricType.agm_standard)
#ns_surface = pyac.AGMSurface(radius, 1.0, angvel, pyac.AGMSurface.SurfaceType.agm)
surfaces = [ ns_surface ]
# Build and configure image plane by hand
img = Imgplane(conf, metric, surfaces)
img.verbose = 1
img.incl = np.deg2rad(incl) #set inclination
img.distance = 100000.0*mass #set distance
#Locate star edges
img.find_boundaries(Nedge=50, reltol=1.0e-4, max_iterations=30)
#Build internal coarse grid for the interpolation routines
img.generate_internal_grid(Nrad = 80, Nchi = 50 )
img.dissect_geos()
#Construct output xy image plane from img object
##################################################
ion()
visz = Visualize()
visz.gs.update(hspace = 0.5)
visz.compactness = compactness
visz.plot(img)
#prepare line profile axis object
visz.axs[6] = subplot( visz.gs[3,:] )
visz.axs[6].minorticks_on()
visz.axs[6].set_xlabel(r'Energy')
visz.axs[6].set_ylabel(r'Flux')
#Construct image
#visz.star(img, spot)
#visz.polar(img, spot)
visz.dissect(img)
visz.star_plot(0.0)
visz.polar_dissect(img)
visz.polar_plot(0.0)
##################################################
# Compute line profile
es, yy2 = lineprofile(visz.redshift**4, visz.redshift)
dE = np.max( np.abs(es[0] - compactness), np.abs(compactness - es[-1]))
##################################################
#Save redshift into a file
fname = 'reds_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
print 'Saving to a file: '+fname
np.savetxt(outdir+fname,
visz.redshift.flatten(),
delimiter=',',
fmt = '%10.9e'
)
#Save thetas into a file
fname = 'thetas_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
print 'Saving to a file: '+fname
np.savetxt(outdir+fname,
visz.thetas.flatten(),
delimiter=',',
fmt = '%10.9e'
)
#Save phi into a file
fname = 'phis_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
print 'Saving to a file: '+fname
np.savetxt(outdir+fname,
visz.phis.flatten(),
delimiter=',',
fmt = '%10.9e'
)
#redshift limits
vmin = compactness - dE
vmax = compactness + dE
# Line profile
##################################################
#ax = subplot(gs[2,2])
#ax.set_xlim(0.8, 1.2)
visz.axs[6].plot(es, yy2, "b-")
pause(1.0)
fname = 'neutronstar_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.png'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
savefig(outdir+fname)
#save lineprofile
##################################################
#Finally save to file
fname = 'lineprofile_f{:03d}_bb_r{:02d}_m{:03.1f}_i{:02d}.csv'.format(
np.int(freq),
np.int(R),
M,
np.int(incl),
)
print 'Saving to a file: '+fname
np.savetxt(outdir+fname,
np.vstack((es, yy2)).T,
delimiter=',',
fmt = '%10.9e',
header='Energy, pdf'
)
|
|
"""Set of plots"""
# pylint: disable=invalid-name,too-many-arguments,too-few-public-methods
import datetime
import logging
import math
from abc import ABC
from typing import Optional, Sequence, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.typing as npt
from matplotlib.backends.backend_pdf import PdfPages
from metatlas.io import write_utils
from metatlas.plots import utils
logger = logging.getLogger(__name__)
def restore_matplotlib_settings(func):
"""Stop function from permanently modifiying rcParams"""
def wrapper(*args, **kwargs):
original = matplotlib.rcParams
out = func(*args, **kwargs)
matplotlib.rcParams = original
return out
return wrapper
class Plot(ABC):
"""A single plot"""
def __init__(self, title: str, group_name: str):
self.title = title
self.group_name = group_name
def plot(self, ax: matplotlib.axes.Axes, back_color: utils.Color = "white") -> None:
"""
Draw plot on ax
back_color: background color for plot
"""
ax.ticklabel_format(axis="y", scilimits=[0, 0])
ax.set_facecolor(back_color)
ax.set_title(f"{self.title}\n{self.group_name}")
class PlotSet(ABC):
"""A Set of related plots"""
# pylint: disable=too-few-public-methods,too-many-locals,too-many-arguments
@restore_matplotlib_settings
def __init__(
self,
plots: Sequence[Plot],
max_plots_per_fig: int = 30,
x_min: Optional[float] = None,
x_max: Optional[float] = None,
y_min: Optional[float] = None,
y_max: Optional[float] = None,
sharey: bool = True,
font_scale: float = 2,
):
num_plots = len(plots)
num_pages = math.ceil(num_plots / max_plots_per_fig)
plots_per_page = math.ceil(num_plots / num_pages)
self.figures = []
color_generator = utils.colors()
current_group = ""
plot_idx = 0
scale_factor = font_scale / num_plots**0.5
matplotlib.rcParams.update({"font.size": 10 * scale_factor})
for _ in range(num_pages):
plots_remaining = num_plots - plot_idx
num_plots_this_page = min(plots_per_page, plots_remaining)
cur_fig, axs = utils.wrap_subplots(
num_plots_this_page, sharey=sharey, sharex=True, constrained_layout=True
)
self.figures.append(cur_fig)
for ax in axs:
if plots[plot_idx].group_name != current_group:
current_color = next(color_generator)
current_group = plots[plot_idx].group_name
plots[plot_idx].plot(ax, back_color=current_color)
plot_idx += 1
self.limit_axes(x_min, x_max, y_min, y_max)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def limit_axes(
self,
x_min: Optional[float],
x_max: Optional[float],
y_min: Optional[float],
y_max: Optional[float],
) -> None:
"""Update all plots to have the desired axes limits"""
if x_min is None and x_max is None and y_min is None and y_max is None:
return
sides = None
if x_min is not None or x_max is not None:
if y_min is None and y_max is None:
sides = "both"
elif y_min is None and y_max is not None:
sides = "min"
elif y_min is not None and y_max is None:
sides = "max"
for fig in self.figures:
for ax in fig.axes:
ax.set_xlim(left=x_min, right=x_max)
ax.set_ylim(bottom=y_min, top=y_max)
if sides is not None:
_autoscale(ax, axis="y", sides=sides)
def save_pdf(self, file_name: str, title: str, overwrite: bool = False) -> None:
"""Create a PDF file containing one figure per page"""
write_utils.check_existing_file(file_name, overwrite)
plt.ioff() # don't display the plots
matplotlib.use("agg") # mitigates a memory leak to not use backend_nbagg
with PdfPages(file_name) as pdf:
for fig in self.figures:
pdf.savefig(fig)
metadata = pdf.infodict()
metadata["Title"] = title
metadata["Author"] = "Joint Genome Institute"
metadata["CreationDate"] = datetime.datetime.today()
logger.debug("Exported PDF containing %s to %s.", title, file_name)
def close(self):
"""Close all plots and free their memory"""
for fig in self.figures:
plt.close(fig)
# adapted from https://stackoverflow.com/questions/51323505/how-to-make-relim-and-autoscale-in-a-scatter-plot
def _get_xy(artist: matplotlib.artist.Artist) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]:
"""Gets the xy coordinates of a given artist"""
if "Collection" in str(artist):
return artist.get_offsets().T
if "Line" in str(artist):
return artist.get_xdata(), artist.get_ydata()
raise ValueError("This type of object isn't implemented yet")
# adapted from https://stackoverflow.com/questions/51323505/how-to-make-relim-and-autoscale-in-a-scatter-plot
def _autoscale(ax: matplotlib.axes.Axes, axis: str = "y", sides: str = "both", margin: float = 0.1) -> None:
"""Autoscales the x or y axis of a given matplotlib ax object
to fit the margins set by manually limits of the other axis,
with margins in fraction of the width of the plot
if sides is 'max' or 'min' then only adjust the limit on that side of axis"""
assert axis in ["x", "y"]
assert sides in ["both", "min", "max"]
low, high = np.inf, -np.inf
for artist in ax.collections + ax.lines:
if axis == "y":
set_lim = ax.set_ylim
get_lim = ax.get_ylim
cur_fixed_limit = ax.get_xlim()
fixed, dependent = _get_xy(artist)
else:
set_lim = ax.set_xlim
get_lim = ax.get_xlim
cur_fixed_limit = ax.get_ylim()
dependent, fixed = _get_xy(artist)
low, high = _update_limts(low, high, fixed, dependent, cur_fixed_limit)
margin = margin * (high - low)
if low == np.inf and high == -np.inf:
return
assert low != np.inf and high != -np.inf
new_min = (low - margin) if sides in ["both", "min"] else get_lim()[0]
new_max = (high + margin) if sides in ["both", "max"] else get_lim()[1]
set_lim(new_min, new_max)
def _update_limts(
low: float,
high: float,
fixed: npt.NDArray[np.float64],
dependent: npt.NDArray[np.float64],
fixed_limits: Tuple[float, float],
) -> Tuple[float, float]:
"""Current limits low and high are updated to include data with ranges
in the lists fixed and dependent subject to fixed_limits
"""
local_low, local_high = _calculate_new_limit(fixed, dependent, fixed_limits)
return min(local_low, low), max(local_high, high)
# adapted from https://stackoverflow.com/questions/51323505/how-to-make-relim-and-autoscale-in-a-scatter-plot
def _calculate_new_limit(
fixed: npt.NDArray[np.float64], dependent: npt.NDArray[np.float64], fixed_limits: Tuple[float, float]
) -> Tuple[float, float]:
"""Calculates the min/max of the dependent axis given a fixed axis with limits"""
if len(fixed) > 2:
mask = (fixed > fixed_limits[0]) & (fixed < fixed_limits[1])
window = dependent[mask]
if len(window) == 0:
return np.inf, -np.inf
return window.min(), window.max()
low = dependent[0]
high = dependent[-1]
if low == 0.0 and high == 1.0:
# This is a axhline in the autoscale direction
return np.inf, -np.inf
return low, high
|
|
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.orm import backref
from flask.ext.presst import ModelResource, fields, PolymorphicModelResource, Relationship
from tests import PresstTestCase
class TestModelResource(PresstTestCase):
def setUp(self):
super(TestModelResource, self).setUp()
app = self.app
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
self.db = db = SQLAlchemy(app)
class Tree(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), nullable=False)
class Fruit(db.Model):
fruit_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), nullable=False)
sweetness = db.Column(db.Integer, default=5)
tree_id = db.Column(db.Integer, db.ForeignKey(Tree.id))
tree = db.relationship(Tree, backref=backref('fruits', lazy='dynamic'))
db.create_all()
self.Fruit = Fruit
self.Tree = Tree
class TreeResource(ModelResource):
class Meta:
model = Tree
fruits = Relationship('Fruit')
class FruitResource(ModelResource):
class Meta:
model = Fruit
tree = fields.ToOne(TreeResource)
self.api.add_resource(FruitResource)
self.api.add_resource(TreeResource)
self.FruitResource = FruitResource
self.TreeResource = TreeResource
def tearDown(self):
self.db.drop_all()
def test_field_discovery(self):
self.assertEqual(set(self.TreeResource._fields.keys()), {'name'})
self.assertEqual(set(self.FruitResource._fields.keys()), {'name', 'sweetness', 'tree'})
self.assertEqual(self.FruitResource.resource_name, 'fruit')
self.assertEqual(self.TreeResource.resource_name, 'tree')
def test_create(self):
self.request('POST', '/fruit', {'name': 'Apple'},
{'sweetness': 5, 'name': 'Apple', 'resource_uri': '/fruit/1', 'tree': None}, 200)
self.request('POST', '/fruit', {'name': 'Apple', 'sweetness': 9001},
{'sweetness': 9001, 'name': 'Apple', 'resource_uri': '/fruit/2', 'tree': None}, 200)
self.request('POST', '/fruit', {'sweetness': 1}, None, 400)
self.request('POST', '/tree', {'name': 'Apple'}, {'name': 'Apple', 'resource_uri': '/tree/1'}, 200)
self.request('POST', '/fruit', {'name': 'Apple', 'tree': '/tree/1'},
{'sweetness': 5, 'name': 'Apple', 'resource_uri': '/fruit/3', 'tree': '/tree/1'}, 200)
def test_get(self):
apple = lambda id: {'sweetness': 5, 'name': 'Apple', 'resource_uri': '/fruit/{}'.format(id), 'tree': None}
for i in range(1, 10):
self.request('POST', '/fruit', {'name': 'Apple'}, apple(i), 200)
self.request('GET', '/fruit', None, [apple(i) for i in range(1, i + 1)], 200)
self.request('GET', '/fruit/{}'.format(i), None, apple(i), 200)
self.request('GET', '/fruit/{}'.format(i + 1), None, None, 404)
def test_pagination(self):
apple = lambda id: {'sweetness': 5, 'name': 'Apple', 'resource_uri': '/fruit/{}'.format(id), 'tree': None}
for i in range(1, 10):
self.request('POST', '/fruit', {'name': 'Apple'}, apple(i), 200)
with self.app.test_client() as client:
response = client.get('/fruit')
self.assertEqual(response.headers['Link'],
'</fruit?page=1&per_page=20>; rel="self"')
response = client.get('/fruit?per_page=5')
self.assertEqual(set(response.headers['Link'].split(',')),
{'</fruit?page=1&per_page=5>; rel="self"',
'</fruit?page=2&per_page=5>; rel="last"',
'</fruit?page=2&per_page=5>; rel="next"'})
response = client.get('/fruit?page=1&per_page=5')
self.assertEqual(set(response.headers['Link'].split(',')),
{'</fruit?page=1&per_page=5>; rel="self"',
'</fruit?page=2&per_page=5>; rel="last"',
'</fruit?page=2&per_page=5>; rel="next"'})
response = client.get('/fruit?page=2&per_page=5')
self.assertEqual(set(response.headers['Link'].split(',')),
{'</fruit?page=2&per_page=5>; rel="self"',
'</fruit?page=1&per_page=5>; rel="first"',
'</fruit?page=1&per_page=5>; rel="prev"'})
response = client.get('/fruit?page=2&per_page=2')
self.assertEqual(set(response.headers['Link'].split(',')),
{'</fruit?page=3&per_page=2>; rel="next"',
'</fruit?page=5&per_page=2>; rel="last"',
'</fruit?page=1&per_page=2>; rel="prev"',
'</fruit?page=1&per_page=2>; rel="first"',
'</fruit?page=2&per_page=2>; rel="self"'})
def test_update(self):
self.request('POST', '/fruit', {'name': 'Apple'},
{'sweetness': 5, 'name': 'Apple', 'resource_uri': '/fruit/1', 'tree': None}, 200)
# TODO implement support for ColumnDefault
# FIXME defaults with update
# self.request('POST', '/fruit/1', {'name': 'Golden Apple'},
# {'sweetness': 5, 'name': 'Golden Apple', 'resource_uri': '/fruit/1', 'tree': None}, 200)
self.request('POST', '/fruit/1', {'name': 'Golden Apple', 'sweetness': 0},
{'sweetness': 0, 'name': 'Golden Apple', 'resource_uri': '/fruit/1', 'tree': None}, 200)
self.request('POST', '/fruit/1', {}, None, 400)
def test_patch(self):
self.request('POST', '/tree', {'name': 'Apple tree'}, {'name': 'Apple tree', 'resource_uri': '/tree/1'}, 200)
expected_apple = {'sweetness': 5, 'name': 'Apple', 'resource_uri': '/fruit/1', 'tree': None}
self.request('POST', '/fruit', {'name': 'Apple'}, expected_apple, 200)
changes = [
{'name': 'Golden Apple'},
{'name': 'Golden Apple'},
{'tree': '/tree/1'},
{'sweetness': 3},
{},
{'name': 'Golden Apple', 'tree': None},
]
for change in changes:
expected_apple.update(change)
self.request('PATCH', '/fruit/1', change, expected_apple, 200)
def test_delete(self):
self.request('POST', '/tree', {'name': 'Apple tree'}, {'name': 'Apple tree', 'resource_uri': '/tree/1'}, 200)
self.request('DELETE', '/tree/1', {'name': 'Apple tree'}, None, 204)
self.request('DELETE', '/tree/1', {'name': 'Apple tree'}, None, 404)
self.request('DELETE', '/tree/2', {'name': 'Apple tree'}, None, 404)
def test_no_model(self):
class OopsResource(ModelResource):
class Meta:
pass
self.api.add_resource(OopsResource)
def test_relationship_post(self):
self.request('POST', '/tree', {'name': 'Apple tree'}, {'name': 'Apple tree', 'resource_uri': '/tree/1'}, 200)
self.request('GET', '/tree/1/fruits', None, [], 200)
self.request('POST', '/fruit', {'name': 'Apple'},
{'name': 'Apple', 'resource_uri': '/fruit/1', 'sweetness': 5, 'tree': None}, 200)
self.request('POST', '/tree/1/fruits', '/fruit/1',
{'name': 'Apple', 'resource_uri': '/fruit/1', 'sweetness': 5, 'tree': '/tree/1'}, 200)
def test_relationship_get(self):
self.test_relationship_post()
self.request('GET', '/tree/1/fruits', None,
[{'name': 'Apple', 'resource_uri': '/fruit/1', 'sweetness': 5, 'tree': '/tree/1'}], 200)
def test_relationship_delete(self):
self.test_relationship_post()
self.request('DELETE', '/tree/1/fruits', '/fruit/1', None, 204)
#self.request('GET', '/apple/seed_count', None, 2, 200)
def test_get_schema(self):
self.api.enable_schema()
self.request('GET', '/', None, {
"$schema": "http://json-schema.org/hyper-schema#",
"definitions": {
"fruit": {
"definitions": {
"name": {
"type": "string"
},
"resource_uri": {
"format": "uri",
"readOnly": True,
"type": "string"
},
"sweetness": {
"type": "integer"
}
},
"links": [
{
"href": "/fruit/{id}",
"rel": "self"
}
],
"properties": {
"name": {
"$ref": "#/definitions/fruit/definitions/name"
},
"resource_uri": {
"$ref": "#/definitions/fruit/definitions/resource_uri"
},
"sweetness": {
"$ref": "#/definitions/fruit/definitions/sweetness"
},
"tree": {
"$ref": "#/definitions/tree/definitions/resource_uri"
}
},
"required": [
"name"
]
},
"tree": {
"definitions": {
"name": {
"type": "string"
},
"resource_uri": {
"format": "uri",
"readOnly": True,
"type": "string"
}
},
"links": [
{
"href": "/tree/{id}/fruits",
"rel": "fruits",
"targetSchema": {
"$ref": "#/definitions/fruit"
}
},
{
"href": "/tree/{id}",
"rel": "self"
}
],
"properties": {
"name": {
"$ref": "#/definitions/tree/definitions/name"
},
"resource_uri": {
"$ref": "#/definitions/tree/definitions/resource_uri"
}
},
"required": [
"name"
]
}
},
'properties': {
'tree': {'$ref': '#/definitions/tree'},
'fruit': {'$ref': '#/definitions/fruit'}
}
}, 200)
class TestPolymorphicModelResource(PresstTestCase):
def setUp(self):
super(TestPolymorphicModelResource, self).setUp()
app = self.app
app.config['SQLALCHEMY_ENGINE'] = 'sqlite://'
app.config['TESTING'] = True
self.db = db = SQLAlchemy(app)
class Fruit(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), nullable=False)
table = db.Column(db.String(60))
color = db.Column(db.String)
__mapper_args__ = {
'polymorphic_identity': 'fruit',
'polymorphic_on': table
}
class CitrusFruit(Fruit):
id = db.Column(db.Integer, db.ForeignKey(Fruit.id), primary_key=True)
sweetness = db.Column(db.Integer)
__mapper_args__ = {
'polymorphic_identity': 'citrus',
}
class FruitResource(PolymorphicModelResource):
class Meta:
model = Fruit
exclude_fields = ['table']
class CitrusFruitResource(ModelResource):
class Meta:
model = CitrusFruit
resource_name = 'citrus'
exclude_fields = ['table']
db.create_all()
self.CitrusFruit = CitrusFruit
self.api.add_resource(FruitResource)
self.api.add_resource(CitrusFruitResource)
def test_polymorphic(self):
self.request('POST', '/fruit', {'name': 'Banana', 'color': 'yellow'},
{'name': 'Banana', 'color': 'yellow', 'resource_uri': '/fruit/1'}, 200)
self.request('POST', '/citrus', {'name': 'Lemon', 'color': 'yellow'},
{'name': 'Lemon', 'sweetness': 0, 'color': 'yellow', 'resource_uri': '/citrus/2'}, 200)
self.request('GET', '/fruit', None, [
{'color': 'yellow', 'name': 'Banana', 'resource_uri': '/fruit/1'},
{'citrus': {'color': 'yellow',
'name': 'Lemon',
'resource_uri': '/citrus/2',
'sweetness': 0},
'color': 'yellow',
'name': 'Lemon',
'resource_uri': '/fruit/2'}
], 200)
def test_exclude_polymorphic(self):
class CitrusFruitAltResource(ModelResource):
class Meta:
model = self.CitrusFruit
exclude_polymorphic = True
resource_name = 'citrus_alt'
exclude_fields = ['table']
self.api.add_resource(CitrusFruitAltResource)
self.request('POST', '/citrus', {'name': 'Lemon', 'sweetness': 1},
{'name': 'Lemon', 'sweetness': 1, 'color': None, 'resource_uri': '/citrus/1'}, 200)
self.request('GET', '/citrus_alt/1', None,
{'sweetness': 1, 'resource_uri': '/citrus_alt/1'}, 200)
|
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The spm module provides basic functions for interfacing with SPM tools."""
from nipype.interfaces.traits import Directory
__docformat__ = 'restructuredtext'
# Standard library imports
import os
from copy import deepcopy
import re
# Third-party imports
import numpy as np
from scipy.io import savemat
# Local imports
from nipype.interfaces.base import BaseInterface, traits, TraitedSpec,\
InputMultiPath
from nipype.utils.misc import isdefined
from nipype.externals.pynifti import load
from nipype.interfaces.matlab import MatlabCommand
import nipype.utils.spm_docs as sd
import logging
logger = logging.getLogger('iflogger')
def func_is_3d(in_file):
"""Checks if input functional files are 3d."""
if isinstance(in_file, list):
return func_is_3d(in_file[0])
else:
img = load(in_file)
shape = img.get_shape()
if len(shape) == 3 or (len(shape)==4 and shape[3]==1):
return True
else:
return False
def get_first_3dfile(in_files):
if not func_is_3d(in_files):
return None
if isinstance(in_files[0], list):
return in_files[0]
return in_files
def scans_for_fname(fname):
"""Reads a nifti file and converts it to a numpy array storing
individual nifti volumes.
Opens images so will fail if they are not found.
"""
if isinstance(fname,list):
scans = np.zeros((len(fname),),dtype=object)
for sno,f in enumerate(fname):
scans[sno] = '%s,1'%f
return scans
img = load(fname)
if len(img.get_shape()) == 3:
return np.array(('%s,1'%fname,),dtype=object)
else:
n_scans = img.get_shape()[3]
scans = np.zeros((n_scans,),dtype=object)
for sno in range(n_scans):
scans[sno] = '%s,%d'% (fname, sno+1)
return scans
def scans_for_fnames(fnames,keep4d=False,separate_sessions=False):
"""Converts a list of files to a concatenated numpy array for each
volume.
keep4d : boolean
keeps the entries of the numpy array as 4d files instead of
extracting the individual volumes.
separate_sessions: boolean
if 4d nifti files are being used, then separate_sessions
ensures a cell array per session is created in the structure.
"""
flist = None
if not isinstance(fnames[0], list):
if func_is_3d(fnames[0]):
fnames = [fnames]
if separate_sessions or keep4d:
flist = np.zeros((len(fnames),),dtype=object)
for i,f in enumerate(fnames):
if separate_sessions:
if keep4d:
if isinstance(f,list):
flist[i] = np.array(f, dtype=object)
else:
flist[i] = np.array([f],dtype=object)
else:
flist[i] = scans_for_fname(f)
else:
if keep4d:
flist[i] = f
else:
scans = scans_for_fname(f)
if flist is None:
flist = scans
else:
flist = np.concatenate((flist,scans))
return flist
class Info(object):
"""Handles SPM version information
"""
@staticmethod
def version( matlab_cmd = None ):
"""Returns the path to the SPM directory in the Matlab path
If path not found, returns None.
Parameters
----------
matlab_cmd : String specifying default matlab command
default None, will look for environment variable MATLABCMD
and use if found, otherwise falls back on MatlabCommand
default of 'matlab -nodesktop -nosplash'
Returns
-------
spm_path : string representing path to SPM directory
returns None of path not found
"""
if matlab_cmd is None:
try:
matlab_cmd = os.environ['MATLABCMD']
except:
matlab_cmd = 'matlab -nodesktop -nosplash'
mlab = MatlabCommand(matlab_cmd = matlab_cmd)
mlab.inputs.script_file = 'spminfo'
mlab.inputs.script = """
if isempty(which('spm')),
throw(MException('SPMCheck:NotFound','SPM not in matlab path'));
end;
spm_path = spm('dir');
fprintf(1, 'NIPYPE %s', spm_path);
"""
out = mlab.run()
if out.runtime.returncode == 0:
spm_path = sd._strip_header(out.runtime.stdout)
else:
logger.debug(out.runtime.stderr)
return None
return spm_path
def no_spm():
""" Checks if SPM is NOT installed
used with nosetests skipif to skip tests
that will fail if spm is not installed"""
if Info.version() == None:
return True
else:
return False
class SPMCommandInputSpec(TraitedSpec):
matlab_cmd = traits.Str()
paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath')
mfile = traits.Bool(True, desc='Run m-code using m-file',
usedefault=True)
class SPMCommand(BaseInterface):
"""Extends `BaseInterface` class to implement SPM specific interfaces.
WARNING: Pseudo prototype class, meant to be subclassed
"""
input_spec = SPMCommandInputSpec
_jobtype = 'basetype'
_jobname = 'basename'
def __init__(self, **inputs):
super(SPMCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._matlab_cmd_update, 'matlab_cmd')
self._matlab_cmd_update()
def _matlab_cmd_update(self):
# MatlabCommand has to be created here,
# because matlab_cmb is not a proper input
# and can be set only during init
self.mlab = MatlabCommand(matlab_cmd=self.inputs.matlab_cmd,
mfile=self.inputs.mfile,
paths=self.inputs.paths)
self.mlab.inputs.script_file = 'pyscript_%s.m' % \
self.__class__.__name__.split('.')[-1].lower()
@property
def jobtype(self):
return self._jobtype
@property
def jobname(self):
return self._jobname
def use_mfile(self, use_mfile):
"""boolean,
if true generates a matlab <filename>.m file
if false generates a binary .mat file
"""
self.inputs.mfile = use_mfile
def _run_interface(self, runtime):
"""Executes the SPM function using MATLAB."""
if isdefined(self.inputs.mfile):
self.mlab.inputs.mfile = self.inputs.mfile
if isdefined(self.inputs.paths):
self.mlab.inputs.paths = self.inputs.paths
self.mlab.inputs.script = self._make_matlab_command(deepcopy(self._parse_inputs()))
results = self.mlab.run()
runtime.returncode = results.runtime.returncode
runtime.stdout = results.runtime.stdout
runtime.stderr = results.runtime.stderr
return runtime
def _list_outputs(self):
"""Determine the expected outputs based on inputs."""
raise NotImplementedError
def _format_arg(self, opt, spec, val):
"""Convert input to appropriate format for SPM."""
return val
def _parse_inputs(self, skip=()):
spmdict = {}
metadata=dict(field=lambda t : t is not None)
for name, spec in self.inputs.traits(**metadata).items():
if skip and name in skip:
continue
value = getattr(self.inputs, name)
if not isdefined(value):
continue
field = spec.field
if '.' in field:
fields = field.split('.')
dictref = spmdict
for f in fields[:-1]:
if f not in dictref.keys():
dictref[f] = {}
dictref = dictref[f]
dictref[fields[-1]] = self._format_arg(name, spec, value)
else:
spmdict[field] = self._format_arg(name, spec, value)
return [spmdict]
def _reformat_dict_for_savemat(self, contents):
"""Encloses a dict representation within hierarchical lists.
In order to create an appropriate SPM job structure, a Python
dict storing the job needs to be modified so that each dict
embedded in dict needs to be enclosed as a list element.
Examples
--------
>>> a = SPMCommand()._reformat_dict_for_savemat(dict(a=1,b=dict(c=2,d=3)))
>>> print a
[{'a': 1, 'b': [{'c': 2, 'd': 3}]}]
"""
newdict = {}
try:
for key, value in contents.items():
if isinstance(value, dict):
if value:
newdict[key] = self._reformat_dict_for_savemat(value)
# if value is None, skip
else:
newdict[key] = value
return [newdict]
except TypeError:
print 'Requires dict input'
def _generate_job(self, prefix='', contents=None):
"""Recursive function to generate spm job specification as a string
Parameters
----------
prefix : string
A string that needs to get
contents : dict
A non-tuple Python structure containing spm job
information gets converted to an appropriate sequence of
matlab commands.
"""
jobstring = ''
if contents is None:
return jobstring
if isinstance(contents, list):
for i,value in enumerate(contents):
newprefix = "%s(%d)" % (prefix, i+1)
jobstring += self._generate_job(newprefix, value)
return jobstring
if isinstance(contents, dict):
for key,value in contents.items():
newprefix = "%s.%s" % (prefix, key)
jobstring += self._generate_job(newprefix, value)
return jobstring
if isinstance(contents, np.ndarray):
if contents.dtype == np.dtype(object):
if prefix:
jobstring += "%s = {...\n"%(prefix)
else:
jobstring += "{...\n"
for i,val in enumerate(contents):
if isinstance(val, np.ndarray):
jobstring += self._generate_job(prefix=None,
contents=val)
elif isinstance(val,str):
jobstring += '\'%s\';...\n'%(val)
else:
jobstring += '%s;...\n'%str(val)
jobstring += '};\n'
else:
for i,val in enumerate(contents):
for field in val.dtype.fields:
if prefix:
newprefix = "%s(%d).%s"%(prefix, i+1, field)
else:
newprefix = "(%d).%s"%(i+1, field)
jobstring += self._generate_job(newprefix,
val[field])
return jobstring
if isinstance(contents, str):
jobstring += "%s = '%s';\n" % (prefix,contents)
return jobstring
jobstring += "%s = %s;\n" % (prefix,str(contents))
return jobstring
def _make_matlab_command(self, contents, postscript=None):
"""Generates a mfile to build job structure
Parameters
----------
contents : list
a list of dicts generated by _parse_inputs
in each subclass
cwd : string
default os.getcwd()
Returns
-------
mscript : string
contents of a script called by matlab
"""
cwd = os.getcwd()
mscript = """
%% Generated by nipype.interfaces.spm
if isempty(which('spm')),
throw(MException('SPMCheck:NotFound','SPM not in matlab path'));
end
fprintf('SPM version: %s\\n',spm('ver'));
fprintf('SPM path: %s\\n',which('spm'));
spm('Defaults','fMRI');
if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');end\n
"""
if self.mlab.inputs.mfile:
if self.jobname in ['st','smooth','preproc','preproc8','fmri_spec','fmri_est',
'factorial_design'] :
# parentheses
mscript += self._generate_job('jobs{1}.%s{1}.%s(1)' %
(self.jobtype,self.jobname), contents[0])
else:
#curly brackets
mscript += self._generate_job('jobs{1}.%s{1}.%s{1}' %
(self.jobtype,self.jobname), contents[0])
else:
jobdef = {'jobs':[{self.jobtype:[{self.jobname:self.reformat_dict_for_savemat
(contents[0])}]}]}
savemat(os.path.join(cwd,'pyjobs_%s.mat'%self.jobname), jobdef)
mscript += "load pyjobs_%s;\n\n" % self.jobname
mscript += """
if strcmp(spm('ver'),'SPM8'),
jobs=spm_jobman('spm5tospm8',{jobs});
end
spm_jobman(\'run\',jobs);\n
"""
if postscript is not None:
mscript += postscript
return mscript
|
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
# Project
from astropy import units as u
from astropy.utils import OrderedDescriptor, ShapedLikeNDArray
__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute',
'EarthLocationAttribute', 'CoordinateAttribute',
'CartesianRepresentationAttribute',
'DifferentialAttribute']
class Attribute(OrderedDescriptor):
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
_class_attribute_ = 'frame_attributes'
_name_attribute_ = 'name'
name = '<unbound>'
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, '_' + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
instance_shape = getattr(instance, 'shape', None)
if instance_shape is not None and (getattr(out, 'size', 1) > 1 and
out.shape != instance_shape):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(np.broadcast_to, shape=instance_shape,
subok=True)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
"attribute {} should be scalar or have shape {}, "
"but is has shape {} and could not be broadcast."
.format(self.name, instance_shape, out.shape))
converted = True
if converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
'Invalid time input {}={!r}\n{}'.format(self.name,
value, err))
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute='', unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (isinstance(value, list) and len(value) == 3 and
all(v == 0 for v in value) and self.unit is not None):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, 'xyz') and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, 'xyz', value)
if not hasattr(value, 'unit'):
raise TypeError('tried to set a {} with something that does '
'not have a unit.'
.format(self.__class__.__name__))
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Can be `None`, which should be used for special cases in associated
frame transformations like "this quantity should be ignored" or similar.
Parameters
----------
default : value or Quantity or None
Default value for the attribute if the user does not supply one. If a
Quantity, it must be consistent with ``unit``, or if a value, ``unit``
cannot be None.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute='', unit=None,
shape=None):
if default is None and unit is None:
raise ValueError('Either a default quantity value must be '
'provided, or a unit must be provided to define a '
'QuantityAttribute.')
if default is not None and unit is None:
unit = default.unit
self.unit = unit
self.shape = shape
default = self.convert_input(default)[0]
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if np.all(value == 0) and self.unit is not None:
return u.Quantity(np.zeros(self.shape), self.unit), True
else:
if not hasattr(value, 'unit') and self.unit != u.dimensionless_unscaled:
raise TypeError('Tried to set a QuantityAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
raise ValueError('The provided value has shape "{}", but '
'should have shape "{}"'.format(value.shape,
self.shape))
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS)
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
low-level frame class *or* a `~astropy.coordinates.SkyCoord`, but will
always be converted to the low-level frame class when accessed.
Parameters
----------
frame : a coordinate frame class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=''):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, self._frame):
return value, False
else:
if not hasattr(value, 'transform_to'):
raise ValueError('"{}" was passed into a '
'CoordinateAttribute, but it does not have '
'"transform_to" method'.format(value))
transformedobj = value.transform_to(self._frame)
if hasattr(transformedobj, 'frame'):
transformedobj = transformedobj.frame
return transformedobj, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None,
secondary_attribute=''):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if not isinstance(value, self.allowed_classes):
if len(self.allowed_classes) == 1:
value = self.allowed_classes[0](value)
else:
raise TypeError('Tried to set a DifferentialAttribute with '
'an unsupported Differential type {}. Allowed '
'classes are: {}'
.format(value.__class__,
self.allowed_classes))
return value, True
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import CartesianRepresentation, BaseDifferential
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime
import operator
import nose
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.core.common as com
import pandas.formats.printing as printing
import pandas as pd
from pandas.util.testing import (assert_numpy_array_equal,
assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import (TestData, _check_mixed_float,
_check_mixed_int)
class TestDataFrameOperators(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_operators(self):
garbage = random.random(4)
colSeries = Series(garbage, index=np.array(self.frame.columns))
idSum = self.frame + self.frame
seriesSum = self.frame + colSeries
for col, series in compat.iteritems(idSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] * 2
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
for col, series in compat.iteritems(seriesSum):
for idx, val in compat.iteritems(series):
origVal = self.frame[col][idx] + colSeries[col]
if not np.isnan(val):
self.assertEqual(val, origVal)
else:
self.assertTrue(np.isnan(origVal))
added = self.frame2 + self.frame2
expected = self.frame2 * 2
assert_frame_equal(added, expected)
df = DataFrame({'a': ['a', None, 'b']})
assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))
# Test for issue #10181
for dtype in ('float', 'int64'):
frames = [
DataFrame(dtype=dtype),
DataFrame(columns=['A'], dtype=dtype),
DataFrame(index=[0], dtype=dtype),
]
for df in frames:
self.assertTrue((df + df).equals(df))
assert_frame_equal(df + df, df)
def test_ops_np_scalar(self):
vals, xs = np.random.rand(5, 3), [nan, 7, -23, 2.718, -3.14, np.inf]
f = lambda x: DataFrame(x, index=list('ABCDE'),
columns=['jim', 'joe', 'jolie'])
df = f(vals)
for x in xs:
assert_frame_equal(df / np.array(x), f(vals / x))
assert_frame_equal(np.array(x) * df, f(vals * x))
assert_frame_equal(df + np.array(x), f(vals + x))
assert_frame_equal(np.array(x) - df, f(x - vals))
def test_operators_boolean(self):
# GH 5808
# empty frames, non-mixed dtype
result = DataFrame(index=[1]) & DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) | DataFrame(index=[1])
assert_frame_equal(result, DataFrame(index=[1]))
result = DataFrame(index=[1]) & DataFrame(index=[1, 2])
assert_frame_equal(result, DataFrame(index=[1, 2]))
result = DataFrame(index=[1], columns=['A']) & DataFrame(
index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) & DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
result = DataFrame(True, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(True, index=[1], columns=['A']))
# boolean ops
result = DataFrame(1, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
assert_frame_equal(result, DataFrame(1, index=[1], columns=['A']))
def f():
DataFrame(1.0, index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
self.assertRaises(TypeError, f)
def f():
DataFrame('foo', index=[1], columns=['A']) | DataFrame(
True, index=[1], columns=['A'])
self.assertRaises(TypeError, f)
def test_operators_none_as_na(self):
df = DataFrame({"col1": [2, 5.0, 123, None],
"col2": [1, 2, 3, 4]}, dtype=object)
ops = [operator.add, operator.sub, operator.mul, operator.truediv]
# since filling converts dtypes from object, changed expected to be
# object
for op in ops:
filled = df.fillna(np.nan)
result = op(df, 3)
expected = op(filled, 3).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df)
expected = op(filled, filled).astype(object)
expected[com.isnull(expected)] = None
assert_frame_equal(result, expected)
result = op(df, df.fillna(7))
assert_frame_equal(result, expected)
result = op(df.fillna(7), df)
assert_frame_equal(result, expected, check_dtype=False)
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
# GH4968
# invalid date/int comparisons
df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])
df['dates'] = date_range('20010101', periods=len(df))
df2 = df.copy()
df2['dates'] = df['a']
check(df, df2)
df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])
df2 = DataFrame({'a': date_range('20010101', periods=len(
df)), 'b': date_range('20100101', periods=len(df))})
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH4982
df = DataFrame({'dates1': date_range('20010101', periods=10),
'dates2': date_range('20010102', periods=10),
'intcol': np.random.randint(1000000000, size=10),
'floatcol': np.random.randn(10),
'stringcol': list(tm.rands(10))})
df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT
ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
expected = left_f(df, Timestamp('20010109'))
result = right_f(Timestamp('20010109'), df)
assert_frame_equal(result, expected)
# nats
expected = left_f(df, Timestamp('nat'))
result = right_f(Timestamp('nat'), df)
assert_frame_equal(result, expected)
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
# this is technically wrong as the integer portion is coerced to float
# ###
expected = DataFrame({'first': Series([0, 0, 0, 0], dtype='float64'),
'second': Series([np.nan, np.nan, np.nan, 0])})
result = p % p
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values % p.values
result2 = DataFrame(arr, index=p.index,
columns=p.columns, dtype='float64')
result2.iloc[0:3, 1] = np.nan
assert_frame_equal(result2, expected)
result = p % 0
expected = DataFrame(np.nan, index=p.index, columns=p.columns)
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values.astype('float64') % 0
result2 = DataFrame(arr, index=p.index, columns=p.columns)
assert_frame_equal(result2, expected)
# not commutative with series
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s % p
res2 = p % s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_div(self):
# integer div, but deal with the 0's (GH 9144)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p / p
expected = DataFrame({'first': Series([1.0, 1.0, 1.0, 1.0]),
'second': Series([nan, nan, nan, 1])})
assert_frame_equal(result, expected)
with np.errstate(all='ignore'):
arr = p.values.astype('float') / p.values
result2 = DataFrame(arr, index=p.index,
columns=p.columns)
assert_frame_equal(result2, expected)
result = p / 0
expected = DataFrame(np.inf, index=p.index, columns=p.columns)
expected.iloc[0:3, 1] = nan
assert_frame_equal(result, expected)
# numpy has a slightly different (wrong) treatement
with np.errstate(all='ignore'):
arr = p.values.astype('float64') / 0
result2 = DataFrame(arr, index=p.index,
columns=p.columns)
assert_frame_equal(result2, expected)
p = DataFrame(np.random.randn(10, 5))
s = p[0]
res = s / p
res2 = p / s
self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))
def test_logical_operators(self):
def _check_bin_op(op):
result = op(df1, df2)
expected = DataFrame(op(df1.values, df2.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
def _check_unary_op(op):
result = op(df1)
expected = DataFrame(op(df1.values), index=df1.index,
columns=df1.columns)
self.assertEqual(result.values.dtype, np.bool_)
assert_frame_equal(result, expected)
df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': False, 'b': False, 'c': True,
'd': False, 'e': False},
'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},
'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}
df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'b': {'a': False, 'b': True, 'c': False,
'd': False, 'e': False},
'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},
'd': {'a': False, 'b': False, 'c': False,
'd': True, 'e': False},
'e': {'a': False, 'b': False, 'c': False,
'd': False, 'e': True}}
df1 = DataFrame(df1)
df2 = DataFrame(df2)
_check_bin_op(operator.and_)
_check_bin_op(operator.or_)
_check_bin_op(operator.xor)
# operator.neg is deprecated in numpy >= 1.9
_check_unary_op(operator.inv)
def test_logical_typeerror(self):
if not compat.PY3:
self.assertRaises(TypeError, self.frame.__eq__, 'foo')
self.assertRaises(TypeError, self.frame.__lt__, 'foo')
self.assertRaises(TypeError, self.frame.__gt__, 'foo')
self.assertRaises(TypeError, self.frame.__ne__, 'foo')
else:
raise nose.SkipTest('test_logical_typeerror not tested on PY3')
def test_logical_with_nas(self):
d = DataFrame({'a': [np.nan, False], 'b': [True, True]})
# GH4947
# bool comparisons should return bool
result = d['a'] | d['b']
expected = Series([False, True])
assert_series_equal(result, expected)
# GH4604, automatic casting here
result = d['a'].fillna(False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
result = d['a'].fillna(False, downcast=False) | d['b']
expected = Series([True, True])
assert_series_equal(result, expected)
def test_neg(self):
# what to do?
assert_frame_equal(-self.frame, -1 * self.frame)
def test_invert(self):
assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))
def test_arith_flex_frame(self):
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']
if not compat.PY3:
aliases = {}
else:
aliases = {'div': 'truediv'}
for op in ops:
try:
alias = aliases.get(op, op)
f = getattr(operator, alias)
result = getattr(self.frame, op)(2 * self.frame)
exp = f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='object', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
# rops
r_f = lambda x, y: f(y, x)
result = getattr(self.frame, 'r' + op)(2 * self.frame)
exp = r_f(self.frame, 2 * self.frame)
assert_frame_equal(result, exp)
# vs mix float
result = getattr(self.mixed_float, op)(
2 * self.mixed_float)
exp = f(self.mixed_float, 2 * self.mixed_float)
assert_frame_equal(result, exp)
_check_mixed_float(result, dtype=dict(C=None))
result = getattr(self.intframe, op)(2 * self.intframe)
exp = f(self.intframe, 2 * self.intframe)
assert_frame_equal(result, exp)
# vs mix int
if op in ['add', 'sub', 'mul']:
result = getattr(self.mixed_int, op)(
2 + self.mixed_int)
exp = f(self.mixed_int, 2 + self.mixed_int)
# overflow in the uint
dtype = None
if op in ['sub']:
dtype = dict(B='object', C=None)
elif op in ['add', 'mul']:
dtype = dict(C=None)
assert_frame_equal(result, exp)
_check_mixed_int(result, dtype=dtype)
except:
printing.pprint_thing("Failing operation %r" % op)
raise
# ndim >= 3
ndim_5 = np.ones(self.frame.shape + (3, 4, 5))
msg = "Unable to coerce to Series/DataFrame"
with assertRaisesRegexp(ValueError, msg):
f(self.frame, ndim_5)
with assertRaisesRegexp(ValueError, msg):
getattr(self.frame, op)(ndim_5)
# res_add = self.frame.add(self.frame)
# res_sub = self.frame.sub(self.frame)
# res_mul = self.frame.mul(self.frame)
# res_div = self.frame.div(2 * self.frame)
# assert_frame_equal(res_add, self.frame + self.frame)
# assert_frame_equal(res_sub, self.frame - self.frame)
# assert_frame_equal(res_mul, self.frame * self.frame)
# assert_frame_equal(res_div, self.frame / (2 * self.frame))
const_add = self.frame.add(1)
assert_frame_equal(const_add, self.frame + 1)
# corner cases
result = self.frame.add(self.frame[:0])
assert_frame_equal(result, self.frame * np.nan)
result = self.frame[:0].add(self.frame)
assert_frame_equal(result, self.frame * np.nan)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], fill_value=3)
with assertRaisesRegexp(NotImplementedError, 'fill_value'):
self.frame.add(self.frame.iloc[0], axis='index', fill_value=3)
def test_binary_ops_align(self):
# test aligning binary ops
# GH 6681
index = MultiIndex.from_product([list('abc'),
['one', 'two', 'three'],
[1, 2, 3]],
names=['first', 'second', 'third'])
df = DataFrame(np.arange(27 * 3).reshape(27, 3),
index=index,
columns=['value1', 'value2', 'value3']).sortlevel()
idx = pd.IndexSlice
for op in ['add', 'sub', 'mul', 'div', 'truediv']:
opa = getattr(operator, op, None)
if opa is None:
continue
x = Series([1.0, 10.0, 100.0], [1, 2, 3])
result = getattr(df, op)(x, level='third', axis=0)
expected = pd.concat([opa(df.loc[idx[:, :, i], :], v)
for i, v in x.iteritems()]).sortlevel()
assert_frame_equal(result, expected)
x = Series([1.0, 10.0], ['two', 'three'])
result = getattr(df, op)(x, level='second', axis=0)
expected = (pd.concat([opa(df.loc[idx[:, i], :], v)
for i, v in x.iteritems()])
.reindex_like(df).sortlevel())
assert_frame_equal(result, expected)
# GH9463 (alignment level of dataframe with series)
midx = MultiIndex.from_product([['A', 'B'], ['a', 'b']])
df = DataFrame(np.ones((2, 4), dtype='int64'), columns=midx)
s = pd.Series({'a': 1, 'b': 2})
df2 = df.copy()
df2.columns.names = ['lvl0', 'lvl1']
s2 = s.copy()
s2.index.name = 'lvl1'
# different cases of integer/string level names:
res1 = df.mul(s, axis=1, level=1)
res2 = df.mul(s2, axis=1, level=1)
res3 = df2.mul(s, axis=1, level=1)
res4 = df2.mul(s2, axis=1, level=1)
res5 = df2.mul(s, axis=1, level='lvl1')
res6 = df2.mul(s2, axis=1, level='lvl1')
exp = DataFrame(np.array([[1, 2, 1, 2], [1, 2, 1, 2]], dtype='int64'),
columns=midx)
for res in [res1, res2]:
assert_frame_equal(res, exp)
exp.columns.names = ['lvl0', 'lvl1']
for res in [res3, res4, res5, res6]:
assert_frame_equal(res, exp)
def test_arith_mixed(self):
left = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 2, 3]})
result = left + left
expected = DataFrame({'A': ['aa', 'bb', 'cc'],
'B': [2, 4, 6]})
assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
self._test_op(df, operator.add)
self._test_op(df, operator.sub)
self._test_op(df, operator.mul)
self._test_op(df, operator.truediv)
self._test_op(df, operator.floordiv)
self._test_op(df, operator.pow)
self._test_op(df, lambda x, y: y + x)
self._test_op(df, lambda x, y: y - x)
self._test_op(df, lambda x, y: y * x)
self._test_op(df, lambda x, y: y / x)
self._test_op(df, lambda x, y: y ** x)
self._test_op(df, lambda x, y: x + y)
self._test_op(df, lambda x, y: x - y)
self._test_op(df, lambda x, y: x * y)
self._test_op(df, lambda x, y: x / y)
self._test_op(df, lambda x, y: x ** y)
@staticmethod
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
assert_series_equal(result[col], op(df[col], 1))
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = DataFrame(data)
other = DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.ix[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
assert_frame_equal(rs, xp)
# DataFrame
self.assertTrue(df.eq(df).values.all())
self.assertFalse(df.ne(df).values.any())
for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
assert_frame_equal(f(other.values), o(df, other.values))
# scalar
assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
assert_frame_equal(f(np.nan), o(df, np.nan))
with assertRaisesRegexp(ValueError, msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
assert_frame_equal(col_eq, df == Series(col_ser))
assert_frame_equal(col_eq, -col_ne)
assert_frame_equal(idx_eq, -idx_ne)
assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
assert_frame_equal(col_eq, df.eq(list(col_ser)))
assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))
assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
assert_frame_equal(col_gt, df > Series(col_ser))
assert_frame_equal(col_gt, -col_le)
assert_frame_equal(idx_gt, -idx_le)
assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
assert_frame_equal(col_ge, df >= Series(col_ser))
assert_frame_equal(col_ge, -col_lt)
assert_frame_equal(idx_ge, -idx_lt)
assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = Series(np.random.randn(5))
col_ser = Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.ix[0, 0] = np.nan
rs = df.eq(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ne(df)
self.assertTrue(rs.ix[0, 0])
rs = df.gt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.lt(df)
self.assertFalse(rs.ix[0, 0])
rs = df.ge(df)
self.assertFalse(rs.ix[0, 0])
rs = df.le(df)
self.assertFalse(rs.ix[0, 0])
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = DataFrame({'a': arr})
df2 = DataFrame({'a': arr2})
rs = df.gt(df2)
self.assertFalse(rs.values.any())
rs = df.ne(df2)
self.assertTrue(rs.values.all())
arr3 = np.array([2j, np.nan, None])
df3 = DataFrame({'a': arr3})
rs = df3.gt(2j)
self.assertFalse(rs.values.any())
# corner, dtype=object
df1 = DataFrame({'col': ['foo', np.nan, 'bar']})
df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})
result = df1.ne(df2)
exp = DataFrame({'col': [False, True, False]})
assert_frame_equal(result, exp)
def test_dti_tz_convert_to_utc(self):
base = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz='UTC')
idx1 = base.tz_convert('Asia/Tokyo')[:2]
idx2 = base.tz_convert('US/Eastern')[1:]
df1 = DataFrame({'A': [1, 2]}, index=idx1)
df2 = DataFrame({'A': [1, 1]}, index=idx2)
exp = DataFrame({'A': [np.nan, 3, np.nan]}, index=base)
assert_frame_equal(df1 + df2, exp)
def test_arith_flex_series(self):
df = self.simple
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
assert_frame_equal(f(row), op(df, row))
assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
assert_frame_equal(df.div(row), df / row)
assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH7325
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='int64')
expected = DataFrame([[nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
df = DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='float64')
expected = DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = self.simple
val1 = df.xs('a').values
added = DataFrame(df.values + val1, index=df.index, columns=df.columns)
assert_frame_equal(df + val1, added)
added = DataFrame((df.values.T + val1).T,
index=df.index, columns=df.columns)
assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df['two'])
added = DataFrame(df.values + val2, index=df.index, columns=df.columns)
assert_frame_equal(df + val2, added)
added = DataFrame((df.values.T + val2).T, index=df.index,
columns=df.columns)
assert_frame_equal(df.add(val2, axis='index'), added)
val3 = np.random.rand(*df.shape)
added = DataFrame(df.values + val3, index=df.index, columns=df.columns)
assert_frame_equal(df.add(val3), added)
def test_combineFrame(self):
frame_copy = self.frame.reindex(self.frame.index[::2])
del frame_copy['D']
frame_copy['C'][:5] = nan
added = self.frame + frame_copy
indexer = added['A'].valid().index
exp = (self.frame['A'] * 2).copy()
tm.assert_series_equal(added['A'].valid(), exp.loc[indexer])
exp.loc[~exp.index.isin(indexer)] = np.nan
tm.assert_series_equal(added['A'], exp.loc[added['A'].index])
self.assertTrue(
np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())
# assert(False)
self.assertTrue(np.isnan(added['D']).all())
self_added = self.frame + self.frame
self.assert_index_equal(self_added.index, self.frame.index)
added_rev = frame_copy + self.frame
self.assertTrue(np.isnan(added['D']).all())
self.assertTrue(np.isnan(added_rev['D']).all())
# corner cases
# empty
plus_empty = self.frame + self.empty
self.assertTrue(np.isnan(plus_empty.values).all())
empty_plus = self.empty + self.frame
self.assertTrue(np.isnan(empty_plus.values).all())
empty_empty = self.empty + self.empty
self.assertTrue(empty_empty.empty)
# out of order
reverse = self.frame.reindex(columns=self.frame.columns[::-1])
assert_frame_equal(reverse + self.frame, self.frame * 2)
# mix vs float64, upcast
added = self.frame + self.mixed_float
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + self.frame
_check_mixed_float(added, dtype='float64')
# mix vs mix
added = self.mixed_float + self.mixed_float2
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float2 + self.mixed_float
_check_mixed_float(added, dtype=dict(C=None))
# with int
added = self.frame + self.mixed_int
_check_mixed_float(added, dtype='float64')
def test_combineSeries(self):
# Series
series = self.frame.xs(self.frame.index[0])
added = self.frame + series
for key, s in compat.iteritems(added):
assert_series_equal(s, self.frame[key] + series[key])
larger_series = series.to_dict()
larger_series['E'] = 1
larger_series = Series(larger_series)
larger_added = self.frame + larger_series
for key, s in compat.iteritems(self.frame):
assert_series_equal(larger_added[key], s + series[key])
self.assertIn('E', larger_added)
self.assertTrue(np.isnan(larger_added['E']).all())
# vs mix (upcast) as needed
added = self.mixed_float + series
_check_mixed_float(added, dtype='float64')
added = self.mixed_float + series.astype('float32')
_check_mixed_float(added, dtype=dict(C=None))
added = self.mixed_float + series.astype('float16')
_check_mixed_float(added, dtype=dict(C=None))
# these raise with numexpr.....as we are adding an int64 to an
# uint64....weird vs int
# added = self.mixed_int + (100*series).astype('int64')
# _check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C =
# 'int64', D = 'int64'))
# added = self.mixed_int + (100*series).astype('int32')
# _check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C =
# 'int32', D = 'int64'))
# TimeSeries
ts = self.tsframe['A']
# 10890
# we no longer allow auto timeseries broadcasting
# and require explict broadcasting
added = self.tsframe.add(ts, axis='index')
for key, col in compat.iteritems(self.tsframe):
result = col + ts
assert_series_equal(added[key], result, check_names=False)
self.assertEqual(added[key].name, key)
if col.name == ts.name:
self.assertEqual(result.name, 'A')
else:
self.assertTrue(result.name is None)
smaller_frame = self.tsframe[:-5]
smaller_added = smaller_frame.add(ts, axis='index')
self.assert_index_equal(smaller_added.index, self.tsframe.index)
smaller_ts = ts[:-5]
smaller_added2 = self.tsframe.add(smaller_ts, axis='index')
assert_frame_equal(smaller_added, smaller_added2)
# length 0, result is all-nan
result = self.tsframe.add(ts[:0], axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# Frame is all-nan
result = self.tsframe[:0].add(ts, axis='index')
expected = DataFrame(np.nan, index=self.tsframe.index,
columns=self.tsframe.columns)
assert_frame_equal(result, expected)
# empty but with non-empty index
frame = self.tsframe[:1].reindex(columns=[])
result = frame.mul(ts, axis='index')
self.assertEqual(len(result), len(ts))
def test_combineFunc(self):
result = self.frame * 2
self.assert_numpy_array_equal(result.values, self.frame.values * 2)
# vs mix
result = self.mixed_float * 2
for c, s in compat.iteritems(result):
self.assert_numpy_array_equal(
s.values, self.mixed_float[c].values * 2)
_check_mixed_float(result, dtype=dict(C=None))
result = self.empty * 2
self.assertIs(result.index, self.empty.index)
self.assertEqual(len(result.columns), 0)
def test_comparisons(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
row = self.simple.xs('a')
ndim_5 = np.ones(df1.shape + (1, 1, 1))
def test_comp(func):
result = func(df1, df2)
self.assert_numpy_array_equal(result.values,
func(df1.values, df2.values))
with assertRaisesRegexp(ValueError, 'Wrong number of dimensions'):
func(df1, ndim_5)
result2 = func(self.simple, row)
self.assert_numpy_array_equal(result2.values,
func(self.simple.values, row.values))
result3 = func(self.frame, 0)
self.assert_numpy_array_equal(result3.values,
func(self.frame.values, 0))
with assertRaisesRegexp(ValueError, 'Can only compare '
'identically-labeled DataFrame'):
func(self.simple, self.simple[:2])
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_comparison_protected_from_errstate(self):
missing_df = tm.makeDataFrame()
missing_df.iloc[0]['A'] = np.nan
with np.errstate(invalid='ignore'):
expected = missing_df.values < 0
with np.errstate(invalid='raise'):
result = (missing_df < 0).values
self.assert_numpy_array_equal(result, expected)
def test_string_comparison(self):
df = DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
assert_frame_equal(df[mask_a], df.ix[1:1, :])
assert_frame_equal(df[-mask_a], df.ix[0:0, :])
mask_b = df.b == "foo"
assert_frame_equal(df[mask_b], df.ix[0:0, :])
assert_frame_equal(df[-mask_b], df.ix[1:1, :])
def test_float_none_comparison(self):
df = DataFrame(np.random.randn(8, 3), index=lrange(8),
columns=['A', 'B', 'C'])
self.assertRaises(TypeError, df.__eq__, None)
def test_boolean_comparison(self):
# GH 4576
# boolean comparisons with a tuple/list give unexpected results
df = DataFrame(np.arange(6).reshape((3, 2)))
b = np.array([2, 2])
b_r = np.atleast_2d([2, 2])
b_c = b_r.T
l = (2, 2, 2)
tup = tuple(l)
# gt
expected = DataFrame([[False, False], [False, True], [True, True]])
result = df > b
assert_frame_equal(result, expected)
result = df.values > b
assert_numpy_array_equal(result, expected.values)
result = df > l
assert_frame_equal(result, expected)
result = df > tup
assert_frame_equal(result, expected)
result = df > b_r
assert_frame_equal(result, expected)
result = df.values > b_r
assert_numpy_array_equal(result, expected.values)
self.assertRaises(ValueError, df.__gt__, b_c)
self.assertRaises(ValueError, df.values.__gt__, b_c)
# ==
expected = DataFrame([[False, False], [True, False], [False, False]])
result = df == b
assert_frame_equal(result, expected)
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
result = df == b_r
assert_frame_equal(result, expected)
result = df.values == b_r
assert_numpy_array_equal(result, expected.values)
self.assertRaises(ValueError, lambda: df == b_c)
self.assertFalse(np.array_equal(df.values, b_c))
# with alignment
df = DataFrame(np.arange(6).reshape((3, 2)),
columns=list('AB'), index=list('abc'))
expected.index = df.index
expected.columns = df.columns
result = df == l
assert_frame_equal(result, expected)
result = df == tup
assert_frame_equal(result, expected)
# not shape compatible
self.assertRaises(ValueError, lambda: df == (2, 2))
self.assertRaises(ValueError, lambda: df == [2, 2])
def test_combineAdd(self):
with tm.assert_produces_warning(FutureWarning):
# trivial
comb = self.frame.combineAdd(self.frame)
assert_frame_equal(comb, self.frame * 2)
# more rigorous
a = DataFrame([[1., nan, nan, 2., nan]],
columns=np.arange(5))
b = DataFrame([[2., 3., nan, 2., 6., nan]],
columns=np.arange(6))
expected = DataFrame([[3., 3., nan, 4., 6., nan]],
columns=np.arange(6))
with tm.assert_produces_warning(FutureWarning):
result = a.combineAdd(b)
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result2 = a.T.combineAdd(b.T)
assert_frame_equal(result2, expected.T)
expected2 = a.combine(b, operator.add, fill_value=0.)
assert_frame_equal(expected, expected2)
# corner cases
with tm.assert_produces_warning(FutureWarning):
comb = self.frame.combineAdd(self.empty)
assert_frame_equal(comb, self.frame)
with tm.assert_produces_warning(FutureWarning):
comb = self.empty.combineAdd(self.frame)
assert_frame_equal(comb, self.frame)
# integer corner case
df1 = DataFrame({'x': [5]})
df2 = DataFrame({'x': [1]})
df3 = DataFrame({'x': [6]})
with tm.assert_produces_warning(FutureWarning):
comb = df1.combineAdd(df2)
assert_frame_equal(comb, df3)
# mixed type GH2191
df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})
df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})
with tm.assert_produces_warning(FutureWarning):
rs = df1.combineAdd(df2)
xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})
assert_frame_equal(xp, rs)
# TODO: test integer fill corner?
def test_combineMult(self):
with tm.assert_produces_warning(FutureWarning):
# trivial
comb = self.frame.combineMult(self.frame)
assert_frame_equal(comb, self.frame ** 2)
# corner cases
comb = self.frame.combineMult(self.empty)
assert_frame_equal(comb, self.frame)
comb = self.empty.combineMult(self.frame)
assert_frame_equal(comb, self.frame)
def test_combine_generic(self):
df1 = self.frame
df2 = self.frame.ix[:-5, ['A', 'B', 'C']]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
self.assertTrue(combined['D'].isnull().all())
self.assertTrue(combined2['D'].isnull().all())
chunk = combined.ix[:-5, ['A', 'B', 'C']]
chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]
exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2
assert_frame_equal(chunk, exp)
assert_frame_equal(chunk2, exp)
def test_inplace_ops_alignment(self):
# inplace ops / ops alignment
# GH 8511
columns = list('abcdefg')
X_orig = DataFrame(np.arange(10 * len(columns))
.reshape(-1, len(columns)),
columns=columns, index=range(10))
Z = 100 * X_orig.iloc[:, 1:-1].copy()
block1 = list('bedcf')
subs = list('bcdef')
# add
X = X_orig.copy()
result1 = (X[block1] + Z).reindex(columns=subs)
X[block1] += Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] + Z[block1]).reindex(columns=subs)
X[block1] += Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
# sub
X = X_orig.copy()
result1 = (X[block1] - Z).reindex(columns=subs)
X[block1] -= Z
result2 = X.reindex(columns=subs)
X = X_orig.copy()
result3 = (X[block1] - Z[block1]).reindex(columns=subs)
X[block1] -= Z[block1]
result4 = X.reindex(columns=subs)
assert_frame_equal(result1, result2)
assert_frame_equal(result1, result3)
assert_frame_equal(result1, result4)
def test_inplace_ops_identity(self):
# GH 5104
# make sure that we are actually changing the object
s_orig = Series([1, 2, 3])
df_orig = DataFrame(np.random.randint(0, 5, size=10).reshape(-1, 5))
# no dtype change
s = s_orig.copy()
s2 = s
s += 1
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1, s)
self.assertIs(s, s2)
self.assertIs(s._data, s2._data)
df = df_orig.copy()
df2 = df
df += 1
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1, df)
self.assertIs(df, df2)
self.assertIs(df._data, df2._data)
# dtype change
s = s_orig.copy()
s2 = s
s += 1.5
assert_series_equal(s, s2)
assert_series_equal(s_orig + 1.5, s)
df = df_orig.copy()
df2 = df
df += 1.5
assert_frame_equal(df, df2)
assert_frame_equal(df_orig + 1.5, df)
self.assertIs(df, df2)
self.assertIs(df._data, df2._data)
# mixed dtype
arr = np.random.randint(0, 10, size=5)
df_orig = DataFrame({'A': arr.copy(), 'B': 'foo'})
df = df_orig.copy()
df2 = df
df['A'] += 1
expected = DataFrame({'A': arr.copy() + 1, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
self.assertIs(df._data, df2._data)
df = df_orig.copy()
df2 = df
df['A'] += 1.5
expected = DataFrame({'A': arr.copy() + 1.5, 'B': 'foo'})
assert_frame_equal(df, expected)
assert_frame_equal(df2, expected)
self.assertIs(df._data, df2._data)
def test_alignment_non_pandas(self):
index = ['A', 'B', 'C']
columns = ['X', 'Y', 'Z']
df = pd.DataFrame(np.random.randn(3, 3), index=index, columns=columns)
align = pd.core.ops._align_method_FRAME
for val in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype=np.int64)]:
tm.assert_series_equal(align(df, val, 'index'),
Series([1, 2, 3], index=df.index))
tm.assert_series_equal(align(df, val, 'columns'),
Series([1, 2, 3], index=df.columns))
# length mismatch
msg = 'Unable to coerce to Series, length must be 3: given 2'
for val in [[1, 2], (1, 2), np.array([1, 2])]:
with tm.assertRaisesRegexp(ValueError, msg):
align(df, val, 'index')
with tm.assertRaisesRegexp(ValueError, msg):
align(df, val, 'columns')
val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(align(df, val, 'index'),
DataFrame(val, index=df.index,
columns=df.columns))
tm.assert_frame_equal(align(df, val, 'columns'),
DataFrame(val, index=df.index,
columns=df.columns))
# shape mismatch
msg = 'Unable to coerce to DataFrame, shape must be'
val = np.array([[1, 2, 3], [4, 5, 6]])
with tm.assertRaisesRegexp(ValueError, msg):
align(df, val, 'index')
with tm.assertRaisesRegexp(ValueError, msg):
align(df, val, 'columns')
val = np.zeros((3, 3, 3))
with tm.assertRaises(ValueError):
align(df, val, 'index')
with tm.assertRaises(ValueError):
align(df, val, 'columns')
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
|
""" Test cases for the various commandline utilities. """
import unittest
import tempfile
import io
import os
from unittest.mock import patch
from ppci.cli.asm import asm
from ppci.cli.build import build
from ppci.cli.c3c import c3c
from ppci.cli.cc import cc
from ppci.cli.hexdump import hexdump
from ppci.cli.java import java
from ppci.cli.link import link
from ppci.cli.objdump import objdump
from ppci.cli.objcopy import objcopy
from ppci.cli.ocaml import ocaml
from ppci.cli.opt import opt
from ppci.cli.pascal import pascal
from ppci.cli.yacc import yacc
from ppci import api
from ppci.common import DiagnosticsManager, SourceLocation
from ppci.binutils.objectfile import ObjectFile, Section, Image
from helper_util import relpath, do_long_tests
def new_temp_file(suffix):
""" Generate a new temporary filename """
handle, filename = tempfile.mkstemp(suffix=suffix)
os.close(handle)
return filename
@unittest.skipUnless(do_long_tests('any'), 'skipping slow tests')
class BuildTestCase(unittest.TestCase):
""" Test the build command-line utility """
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_build_command(self, mock_stdout, mock_stderr):
""" Test normal use """
report_file = new_temp_file('.html')
build_file = relpath(
'..', 'examples', 'lm3s6965evb', 'snake', 'build.xml')
build(['-v', '--report', report_file, '-f', build_file])
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
""" Test help function """
with self.assertRaises(SystemExit) as cm:
build(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('build', mock_stdout.getvalue())
@patch('sys.stderr', new_callable=io.StringIO)
def test_invalid_log_level(self, mock_stderr):
""" Test invalid log level """
with self.assertRaises(SystemExit) as cm:
build(['--log', 'blabla'])
self.assertEqual(2, cm.exception.code)
self.assertIn('invalid log_level value', mock_stderr.getvalue())
@unittest.skipUnless(do_long_tests('any'), 'skipping slow tests')
class C3cTestCase(unittest.TestCase):
""" Test the c3c command-line utility """
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_c3c_command_fails(self, mock_stdout, mock_stderr):
c3_file = relpath('..', 'examples', 'snake', 'game.c3')
obj_file = new_temp_file('.obj')
with self.assertRaises(SystemExit) as cm:
c3c(['-m', 'arm', c3_file, '-o', obj_file])
self.assertEqual(1, cm.exception.code)
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_c3c_command_succes(self, mock_stdout, mock_stderr):
""" Capture stdout. Important because it is closed by the command! """
c3_file = relpath('..', 'examples', 'stm32f4', 'bsp.c3')
obj_file = new_temp_file('.obj')
c3c(['-m', 'arm', c3_file, '-o', obj_file])
@patch('sys.stdout', new_callable=io.StringIO)
def test_c3c_command_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
c3c(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('compiler', mock_stdout.getvalue())
class CcTestCase(unittest.TestCase):
""" Test the cc command-line utility """
c_file = relpath('..', 'examples', 'c', 'hello', 'std.c')
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_cc_command(self, mock_stdout, mock_stderr):
""" Capture stdout. Important because it is closed by the command! """
oj_file = new_temp_file('.oj')
cc(['-m', 'arm', self.c_file, '-o', oj_file])
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_cc_command_s(self, mock_stdout, mock_stderr):
""" Capture stdout. Important because it is closed by the command! """
oj_file = new_temp_file('.oj')
cc(['-m', 'arm', '-S', self.c_file, '-o', oj_file])
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_cc_command_e(self, mock_stdout, mock_stderr):
""" Capture stdout. Important because it is closed by the command! """
oj_file = new_temp_file('.oj')
cc(['-m', 'arm', '-E', self.c_file, '-o', oj_file])
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_cc_command_ir(self, mock_stdout, mock_stderr):
""" Capture stdout. Important because it is closed by the command! """
oj_file = new_temp_file('.oj')
cc(['-m', 'arm', '--ir', self.c_file, '-o', oj_file])
@patch('sys.stdout', new_callable=io.StringIO)
def test_cc_command_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
cc(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('compiler', mock_stdout.getvalue())
class PascalTestCase(unittest.TestCase):
""" Test the pascal command-line program """
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_hello(self, mock_stdout, mock_stderr):
""" Compile hello world.pas """
hello_pas = relpath('..', 'examples', 'src', 'pascal', 'hello.pas')
obj_file = new_temp_file('.obj')
pascal(['-m', 'arm', '-o', obj_file, hello_pas])
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
pascal(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('compiler', mock_stdout.getvalue())
class AsmTestCase(unittest.TestCase):
@patch('sys.stderr', new_callable=io.StringIO)
def test_asm_command(self, mock_stderr):
obj_file = new_temp_file('.obj')
src = relpath('..', 'examples', 'avr', 'arduino-blinky', 'boot.asm')
asm(['-m', 'avr', '-o', obj_file, src])
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
asm(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('assemble', mock_stdout.getvalue())
@unittest.skipUnless(do_long_tests('any'), 'skipping slow tests')
class ObjdumpTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
objdump(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('object file', mock_stdout.getvalue())
@patch('sys.stderr', new_callable=io.StringIO)
def test_command(self, mock_stderr):
obj_file = new_temp_file('.obj')
src = relpath('..', 'examples', 'avr', 'arduino-blinky', 'boot.asm')
asm(['-m', 'avr', '-o', obj_file, src])
with patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
objdump([obj_file])
self.assertIn('SECTION', mock_stdout.getvalue())
@unittest.skipUnless(do_long_tests('any'), 'skipping slow tests')
class ObjcopyTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
objcopy(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('format', mock_stdout.getvalue())
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_command(self, mock_stdout, mock_stderr):
obj_file = new_temp_file('.obj')
bin_file = new_temp_file('.bin')
arch = api.get_arch('arm')
obj = ObjectFile(arch)
data = bytes(range(100))
section = Section('.text')
section.add_data(data)
image = Image('code2', 0)
image.sections.append(section)
obj.add_section(section)
obj.add_image(image)
with open(obj_file, 'w') as f:
obj.save(f)
objcopy(['-O', 'bin', '-S', 'code2', obj_file, bin_file])
with open(bin_file, 'rb') as f:
exported_data = f.read()
self.assertEqual(data, exported_data)
class OptimizeCommandTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
opt(['-h'])
self.assertEqual(0, cm.exception.code)
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_optimize_command(self, mock_stdout, mock_stderr):
in_file = relpath('data', 'add.pi')
out = new_temp_file('.ir')
opt([in_file, out])
@unittest.skipUnless(do_long_tests('any'), 'skipping slow tests')
class LinkCommandTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
link(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('obj', mock_stdout.getvalue())
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_command(self, mock_stdout, mock_stderr):
obj1 = new_temp_file('.obj')
obj2 = new_temp_file('.obj')
obj3 = new_temp_file('.obj')
asm_src = relpath('..', 'examples', 'lm3s6965evb', 'startup.asm')
mmap = relpath('..', 'examples', 'lm3s6965evb', 'memlayout.mmap')
c3_srcs = [
relpath('..', 'examples', 'src', 'snake', 'main.c3'),
relpath('..', 'examples', 'src', 'snake', 'game.c3'),
relpath('..', 'librt', 'io.c3'),
relpath('..', 'examples', 'lm3s6965evb', 'bsp.c3'),
]
asm(['-m', 'arm', '--mtune', 'thumb', '-o', obj1, asm_src])
c3c(['-m', 'arm', '--mtune', 'thumb', '-o', obj2] + c3_srcs)
link(
['-o', obj3, '-L', mmap, obj1, obj2])
class YaccTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
yacc(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('Parser generator', mock_stdout.getvalue())
@patch('sys.stdout', new_callable=io.StringIO)
@patch('sys.stderr', new_callable=io.StringIO)
def test_normal_use(self, mock_stdout, mock_stderr):
""" Test normal yacc use """
grammar_file = relpath('..', 'ppci', 'codegen', 'burg.grammar')
file1 = new_temp_file('.py')
yacc([grammar_file, '-o', file1])
with open(file1, 'r') as f:
content = f.read()
self.assertIn('Automatically generated', content)
class JavaTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
java(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('java', mock_stdout.getvalue())
class OcamlTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
ocaml(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('OCaml', mock_stdout.getvalue())
class HexDumpTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_help(self, mock_stdout):
with self.assertRaises(SystemExit) as cm:
hexdump(['-h'])
self.assertEqual(0, cm.exception.code)
self.assertIn('dump', mock_stdout.getvalue())
@patch('sys.stdout', new_callable=io.StringIO)
def test_dump(self, mock_stdout):
bin_file = relpath('..', 'docs', 'logo', 'logo.png')
hexdump([bin_file])
class DiagnosticsTestCase(unittest.TestCase):
@patch('sys.stdout', new_callable=io.StringIO)
def test_error_reporting(self, mock_stdout):
""" Simulate some errors into the diagnostics system """
filename = relpath('..', 'examples', 'src', 'snake', 'game.c3')
diag = DiagnosticsManager()
with open(filename, 'r') as f:
src = f.read()
diag.add_source(filename, src)
diag.error('Test1', SourceLocation(filename, 1, 2, 1))
diag.error('Test2', SourceLocation(filename, 1000, 2, 1))
diag.error('Test2', SourceLocation("other.c", 1000, 2, 1))
diag.error('Test3', None)
diag.print_errors()
def test_error_repr(self):
diag = DiagnosticsManager()
diag.error('A', None)
self.assertTrue(str(diag.diags))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
|
""" pydevd_vars deals with variables:
resolution/conversion to XML.
"""
from pydevd_constants import * #@UnusedWildImport
from types import * #@UnusedWildImport
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys #@Reimport
try:
from urllib import quote
except:
from urllib.parse import quote #@UnresolvedImport
import threading
import pydevd_resolver
import traceback
#-------------------------------------------------------------------------- defining true and false for earlier versions
try:
__setFalse = False
except:
import __builtin__
setattr(__builtin__, 'True', 1)
setattr(__builtin__, 'False', 0)
#------------------------------------------------------------------------------------------------------ class for errors
class VariableError(RuntimeError):pass
class FrameNotFoundError(RuntimeError):pass
#------------------------------------------------------------------------------------------------------ resolvers in map
if not sys.platform.startswith("java"):
typeMap = [
#None means that it should not be treated as a compound variable
#isintance does not accept a tuple on some versions of python, so, we must declare it expanded
(type(None), None,),
(int, None),
(float, None),
(complex, None),
(str, None),
(tuple, pydevd_resolver.tupleResolver),
(list, pydevd_resolver.tupleResolver),
(dict, pydevd_resolver.dictResolver),
]
try:
typeMap.append((long, None))
except:
pass #not available on all python versions
try:
typeMap.append((unicode, None))
except:
pass #not available on all python versions
try:
typeMap.append((set, pydevd_resolver.setResolver))
except:
pass #not available on all python versions
try:
typeMap.append((frozenset, pydevd_resolver.setResolver))
except:
pass #not available on all python versions
else: #platform is java
from org.python import core #@UnresolvedImport
typeMap = [
(core.PyNone, None),
(core.PyInteger, None),
(core.PyLong, None),
(core.PyFloat, None),
(core.PyComplex, None),
(core.PyString, None),
(core.PyTuple, pydevd_resolver.tupleResolver),
(core.PyList, pydevd_resolver.tupleResolver),
(core.PyDictionary, pydevd_resolver.dictResolver),
(core.PyStringMap, pydevd_resolver.dictResolver),
]
if hasattr(core, 'PyJavaInstance'):
#Jython 2.5b3 removed it.
typeMap.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))
def getType(o):
""" returns a triple (typeObject, typeString, resolver
resolver != None means that variable is a container,
and should be displayed as a hierarchy.
Use the resolver to get its attributes.
All container objects should have a resolver.
"""
try:
type_object = type(o)
type_name = type_object.__name__
except:
#This happens for org.python.core.InitModule
return 'Unable to get Type', 'Unable to get Type', None
try:
if type_name == 'org.python.core.PyJavaInstance':
return (type_object, type_name, pydevd_resolver.instanceResolver)
if type_name == 'org.python.core.PyArray':
return (type_object, type_name, pydevd_resolver.jyArrayResolver)
for t in typeMap:
if isinstance(o, t[0]):
return (type_object, type_name, t[1])
except:
traceback.print_exc()
#no match return default
return (type_object, type_name, pydevd_resolver.defaultResolver)
try:
from xml.sax.saxutils import escape
def makeValidXmlValue(s):
return escape(s, {'"':'"'})
except:
#Simple replacement if it's not there.
def makeValidXmlValue(s):
return s.replace('<', '<').replace('>', '>').replace('"', '"')
def varToXML(v, name):
""" single variable or dictionary to xml representation """
type, typeName, resolver = getType(v)
try:
if hasattr(v, '__class__'):
try:
cName = str(v.__class__)
if cName.find('.') != -1:
cName = cName.split('.')[-1]
elif cName.find("'") != -1: #does not have '.' (could be something like <type 'int'>)
cName = cName[cName.index("'") + 1:]
if cName.endswith("'>"):
cName = cName[:-2]
except:
cName = str(v.__class__)
value = '%s: %s' % (cName, v)
else:
value = str(v)
except:
try:
value = repr(v)
except:
value = 'Unable to get repr for %s' % v.__class__
xml = '<var name="%s" type="%s"' % (makeValidXmlValue(name),makeValidXmlValue(typeName))
if value:
#cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
#fix to work with unicode values
try:
if not IS_PY3K:
if isinstance(value, unicode):
value = value.encode('utf-8')
else:
if isinstance(value, bytes):
value = value.encode('utf-8')
except TypeError: #in java, unicode is a function
pass
xmlValue = ' value="%s"' % (makeValidXmlValue(quote(value, '/>_= \t')))
else:
xmlValue = ''
if resolver is not None:
xmlCont = ' isContainer="True"'
else:
xmlCont = ''
return ''.join((xml, xmlValue, xmlCont, ' />\n'))
if USE_PSYCO_OPTIMIZATION:
try:
import psyco
varToXML = psyco.proxy(varToXML)
except ImportError:
if hasattr(sys, 'exc_clear'): #jython does not have it
sys.exc_clear() #don't keep the traceback -- clients don't want to see it
def frameVarsToXML(frame):
""" dumps frame variables to XML
<var name="var_name" scope="local" type="type" value="value"/>
"""
xml = ""
keys = frame.f_locals.keys()
if hasattr(keys, 'sort'):
keys.sort() #Python 3.0 does not have it
else:
keys = sorted(keys) #Jython 2.1 does not have it
for k in keys:
try:
v = frame.f_locals[k]
xml += varToXML(v, str(k))
except Exception:
traceback.print_exc()
sys.stderr.write("Unexpected error, recovered safely.\n")
return xml
def iterFrames(initialFrame):
'''NO-YIELD VERSION: Iterates through all the frames starting at the specified frame (which will be the first returned item)'''
#cannot use yield
frames = []
while initialFrame is not None:
frames.append(initialFrame)
initialFrame = initialFrame.f_back
return frames
def dumpFrames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != GetThreadId(threading.currentThread()) :
raise VariableError("findFrame: must execute on same thread")
curFrame = GetFrame()
for frame in iterFrames(curFrame):
sys.stdout.write('%s\n' % id(frame))
#===============================================================================
# AdditionalFramesContainer
#===============================================================================
class AdditionalFramesContainer:
lock = threading.Lock()
additional_frames = {} #dict of dicts
def addAdditionalFrameById(thread_id, frames_by_id):
AdditionalFramesContainer.additional_frames[thread_id] = frames_by_id
def removeAdditionalFrameById(thread_id):
del AdditionalFramesContainer.additional_frames[thread_id]
def findFrame(thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
if thread_id != GetThreadId(threading.currentThread()) :
raise VariableError("findFrame: must execute on same thread")
lookingFor = int(frame_id)
if AdditionalFramesContainer.additional_frames:
if DictContains(AdditionalFramesContainer.additional_frames, thread_id):
frame = AdditionalFramesContainer.additional_frames[thread_id].get(lookingFor)
if frame is not None:
return frame
curFrame = GetFrame()
if frame_id == "*":
return curFrame # any frame is specified with "*"
frameFound = None
for frame in iterFrames(curFrame):
if lookingFor == id(frame):
frameFound = frame
del frame
break
del frame
#Important: python can hold a reference to the frame from the current context
#if an exception is raised, so, if we don't explicitly add those deletes
#we might have those variables living much more than we'd want to.
#I.e.: sys.exc_info holding reference to frame that raises exception (so, other places
#need to call sys.exc_clear())
del curFrame
if frameFound is None:
msgFrames = ''
i = 0
for frame in iterFrames(GetFrame()):
i += 1
msgFrames += str(id(frame))
if i % 5 == 0:
msgFrames += '\n'
else:
msgFrames += ' - '
errMsg = '''findFrame: frame not found.
Looking for thread_id:%s, frame_id:%s
Current thread_id:%s, available frames:
%s
''' % (thread_id, lookingFor, GetThreadId(threading.currentThread()), msgFrames)
raise FrameNotFoundError(errMsg)
return frameFound
def resolveCompoundVariable(thread_id, frame_id, scope, attrs):
""" returns the value of the compound variable as a dictionary"""
frame = findFrame(thread_id, frame_id)
attrList = attrs.split('\t')
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
var = frame.f_locals
for k in attrList:
type, _typeName, resolver = getType(var)
var = resolver.resolve(var, k)
try:
type, _typeName, resolver = getType(var)
return resolver.getDictionary(var)
except:
traceback.print_exc()
def evaluateExpression(thread_id, frame_id, expression, doExec):
'''returns the result of the evaluated expression
@param doExec: determines if we should do an exec or an eval
'''
frame = findFrame(thread_id, frame_id)
expression = expression.replace('@LINE@', '\n')
#Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
#(Names not resolved in generator expression in method)
#See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = dict()
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals
try:
if doExec:
try:
#try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
#it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
exec(expression, updated_globals, frame.f_locals)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: #Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
result = None
try:
result = eval(expression, updated_globals, frame.f_locals)
except Exception:
s = StringIO()
traceback.print_exc(file=s)
result = s.getvalue()
try:
try:
etype, value, tb = sys.exc_info()
result = value
finally:
etype = value = tb = None
except:
pass
return result
finally:
#Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame
def changeAttrExpression(thread_id, frame_id, attr, expression):
'''Changes some attribute in a given frame.
@note: it will not (currently) work if we're not in the topmost frame (that's a python
deficiency -- and it appears that there is no way of making it currently work --
will probably need some change to the python internals)
'''
frame = findFrame(thread_id, frame_id)
try:
expression = expression.replace('@LINE@', '\n')
#tests (needs proposed patch in python accepted)
# if hasattr(frame, 'savelocals'):
# if attr in frame.f_locals:
# frame.f_locals[attr] = eval(expression, frame.f_globals, frame.f_locals)
# frame.savelocals()
# return
#
# elif attr in frame.f_globals:
# frame.f_globals[attr] = eval(expression, frame.f_globals, frame.f_locals)
# return
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
frame.f_globals[attr] = eval(expression, frame.f_globals, frame.f_locals)
else:
#default way (only works for changing it in the topmost frame)
exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
except Exception:
traceback.print_exc()
|
|
from __future__ import unicode_literals
import boto
import sure # noqa
from nose.tools import assert_raises, assert_equals, assert_not_equals
from boto.exception import BotoServerError
import base64
from moto import mock_iam
from nose.tools import raises
@mock_iam()
def test_get_all_server_certs():
conn = boto.connect_iam()
conn.upload_server_cert("certname", "certbody", "privatekey")
certs = conn.get_all_server_certs()['list_server_certificates_response']['list_server_certificates_result']['server_certificate_metadata_list']
certs.should.have.length_of(1)
cert1 = certs[0]
cert1.server_certificate_name.should.equal("certname")
cert1.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname")
@mock_iam()
def test_get_server_cert():
conn = boto.connect_iam()
conn.upload_server_cert("certname", "certbody", "privatekey")
cert = conn.get_server_certificate("certname")
cert.server_certificate_name.should.equal("certname")
cert.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname")
@mock_iam()
def test_upload_server_cert():
conn = boto.connect_iam()
conn.upload_server_cert("certname", "certbody", "privatekey")
cert = conn.get_server_certificate("certname")
cert.server_certificate_name.should.equal("certname")
cert.arn.should.equal("arn:aws:iam::123456789012:server-certificate/certname")
@mock_iam()
@raises(BotoServerError)
def test_get_role__should_throw__when_role_does_not_exist():
conn = boto.connect_iam()
conn.get_role('unexisting_role')
@mock_iam()
def test_create_role_and_instance_profile():
conn = boto.connect_iam()
conn.create_instance_profile("my-profile", path="my-path")
conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path")
conn.add_role_to_instance_profile("my-profile", "my-role")
role = conn.get_role("my-role")
role.path.should.equal("my-path")
role.assume_role_policy_document.should.equal("some policy")
profile = conn.get_instance_profile("my-profile")
profile.path.should.equal("my-path")
role_from_profile = list(profile.roles.values())[0]
role_from_profile['role_id'].should.equal(role.role_id)
role_from_profile['role_name'].should.equal("my-role")
conn.list_roles().roles[0].role_name.should.equal('my-role')
@mock_iam()
def test_remove_role_from_instance_profile():
conn = boto.connect_iam()
conn.create_instance_profile("my-profile", path="my-path")
conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path")
conn.add_role_to_instance_profile("my-profile", "my-role")
profile = conn.get_instance_profile("my-profile")
role_from_profile = list(profile.roles.values())[0]
role_from_profile['role_name'].should.equal("my-role")
conn.remove_role_from_instance_profile("my-profile", "my-role")
profile = conn.get_instance_profile("my-profile")
dict(profile.roles).should.be.empty
@mock_iam()
def test_list_instance_profiles():
conn = boto.connect_iam()
conn.create_instance_profile("my-profile", path="my-path")
conn.create_role("my-role", path="my-path")
conn.add_role_to_instance_profile("my-profile", "my-role")
profiles = conn.list_instance_profiles().instance_profiles
len(profiles).should.equal(1)
profiles[0].instance_profile_name.should.equal("my-profile")
profiles[0].roles.role_name.should.equal("my-role")
@mock_iam()
def test_list_instance_profiles_for_role():
conn = boto.connect_iam()
conn.create_role(role_name="my-role", assume_role_policy_document="some policy", path="my-path")
conn.create_role(role_name="my-role2", assume_role_policy_document="some policy2", path="my-path2")
profile_name_list = ['my-profile', 'my-profile2']
profile_path_list = ['my-path', 'my-path2']
for profile_count in range(0, 2):
conn.create_instance_profile(profile_name_list[profile_count], path=profile_path_list[profile_count])
for profile_count in range(0, 2):
conn.add_role_to_instance_profile(profile_name_list[profile_count], "my-role")
profile_dump = conn.list_instance_profiles_for_role(role_name="my-role")
profile_list = profile_dump['list_instance_profiles_for_role_response']['list_instance_profiles_for_role_result']['instance_profiles']
for profile_count in range(0, len(profile_list)):
profile_name_list.remove(profile_list[profile_count]["instance_profile_name"])
profile_path_list.remove(profile_list[profile_count]["path"])
profile_list[profile_count]["roles"]["member"]["role_name"].should.equal("my-role")
len(profile_name_list).should.equal(0)
len(profile_path_list).should.equal(0)
profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2")
profile_list = profile_dump2['list_instance_profiles_for_role_response']['list_instance_profiles_for_role_result']['instance_profiles']
len(profile_list).should.equal(0)
@mock_iam()
def test_list_role_policies():
conn = boto.connect_iam()
conn.create_role("my-role")
conn.put_role_policy("my-role", "test policy", "my policy")
role = conn.list_role_policies("my-role")
role.policy_names[0].should.equal("test policy")
@mock_iam()
def test_put_role_policy():
conn = boto.connect_iam()
conn.create_role("my-role", assume_role_policy_document="some policy", path="my-path")
conn.put_role_policy("my-role", "test policy", "my policy")
policy = conn.get_role_policy("my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name']
policy.should.equal("test policy")
@mock_iam()
def test_update_assume_role_policy():
conn = boto.connect_iam()
role = conn.create_role("my-role")
conn.update_assume_role_policy(role.role_name, "my-policy")
role = conn.get_role("my-role")
role.assume_role_policy_document.should.equal("my-policy")
@mock_iam()
def test_create_user():
conn = boto.connect_iam()
conn.create_user('my-user')
with assert_raises(BotoServerError):
conn.create_user('my-user')
@mock_iam()
def test_get_user():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.get_user('my-user')
conn.create_user('my-user')
conn.get_user('my-user')
@mock_iam()
def test_create_login_profile():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.create_login_profile('my-user', 'my-pass')
conn.create_user('my-user')
conn.create_login_profile('my-user', 'my-pass')
with assert_raises(BotoServerError):
conn.create_login_profile('my-user', 'my-pass')
@mock_iam()
def test_create_access_key():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.create_access_key('my-user')
conn.create_user('my-user')
conn.create_access_key('my-user')
@mock_iam()
def test_get_all_access_keys():
conn = boto.connect_iam()
conn.create_user('my-user')
response = conn.get_all_access_keys('my-user')
assert_equals(
response['list_access_keys_response']['list_access_keys_result']['access_key_metadata'],
[]
)
conn.create_access_key('my-user')
response = conn.get_all_access_keys('my-user')
assert_not_equals(
response['list_access_keys_response']['list_access_keys_result']['access_key_metadata'],
[]
)
@mock_iam()
def test_delete_access_key():
conn = boto.connect_iam()
conn.create_user('my-user')
access_key_id = conn.create_access_key('my-user')['create_access_key_response']['create_access_key_result']['access_key']['access_key_id']
conn.delete_access_key(access_key_id, 'my-user')
@mock_iam()
def test_delete_user():
conn = boto.connect_iam()
with assert_raises(BotoServerError):
conn.delete_user('my-user')
conn.create_user('my-user')
conn.delete_user('my-user')
@mock_iam()
def test_generate_credential_report():
conn = boto.connect_iam()
result = conn.generate_credential_report()
result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('STARTED')
result = conn.generate_credential_report()
result['generate_credential_report_response']['generate_credential_report_result']['state'].should.equal('COMPLETE')
@mock_iam()
def test_get_credential_report():
conn = boto.connect_iam()
conn.create_user('my-user')
with assert_raises(BotoServerError):
conn.get_credential_report()
result = conn.generate_credential_report()
while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE':
result = conn.generate_credential_report()
result = conn.get_credential_report()
report = base64.b64decode(result['get_credential_report_response']['get_credential_report_result']['content'].encode('ascii')).decode('ascii')
report.should.match(r'.*my-user.*')
|
|
# -*- coding: utf-8 -*-
"""Calendar is a dictionary like Python object that can render itself as VCAL
files according to rfc2445.
These are the defined components.
"""
from datetime import datetime
from icalendar.caselessdict import CaselessDict
from icalendar.parser import Contentline
from icalendar.parser import Contentlines
from icalendar.parser import Parameters
from icalendar.parser import q_join
from icalendar.parser import q_split
from icalendar.parser_tools import DEFAULT_ENCODING
from icalendar.parser_tools import data_encode
from icalendar.prop import TypesFactory
from icalendar.prop import vText, vDDDLists
import pytz
######################################
# The component factory
class ComponentFactory(CaselessDict):
"""All components defined in rfc 2445 are registered in this factory class.
To get a component you can use it like this.
"""
def __init__(self, *args, **kwargs):
"""Set keys to upper for initial dict.
"""
super(ComponentFactory, self).__init__(*args, **kwargs)
self['VEVENT'] = Event
self['VTODO'] = Todo
self['VJOURNAL'] = Journal
self['VFREEBUSY'] = FreeBusy
self['VTIMEZONE'] = Timezone
self['STANDARD'] = TimezoneStandard
self['DAYLIGHT'] = TimezoneDaylight
self['VALARM'] = Alarm
self['VCALENDAR'] = Calendar
# These Properties have multiple property values inlined in one propertyline
# seperated by comma. Use CaselessDict as simple caseless set.
INLINE = CaselessDict({
'CATEGORIES': 1,
'RESOURCES': 1,
'FREEBUSY': 1,
})
_marker = []
class Component(CaselessDict):
"""Component is the base object for calendar, Event and the other
components defined in RFC 2445. normally you will not use this class
directy, but rather one of the subclasses.
"""
name = '' # must be defined in each component
required = () # These properties are required
singletons = () # These properties must only appear once
multiple = () # may occur more than once
exclusive = () # These properties are mutually exclusive
inclusive = () # if any occurs the other(s) MUST occur
# ('duration', 'repeat')
ignore_exceptions = False # if True, and we cannot parse this
# component, we will silently ignore
# it, rather than let the exception
# propagate upwards
# not_compliant = [''] # List of non-compliant properties.
def __init__(self, *args, **kwargs):
"""Set keys to upper for initial dict.
"""
super(Component, self).__init__(*args, **kwargs)
# set parameters here for properties that use non-default values
self.subcomponents = [] # Components can be nested.
self.is_broken = False # True if we ignored an exception while
# parsing a property
#def is_compliant(self, name):
# """Returns True is the given property name is compliant with the
# icalendar implementation.
#
# If the parser is too strict it might prevent parsing erroneous but
# otherwise compliant properties. So the parser is pretty lax, but it is
# possible to test for non-complience by calling this method.
# """
# return name in not_compliant
#############################
# handling of property values
def _encode(self, name, value, parameters=None, encode=1):
"""Encode values to icalendar property values.
:param name: Name of the property.
:type name: string
:param value: Value of the property. Either of a basic Python type of
any of the icalendar's own property types.
:type value: Python native type or icalendar property type.
:param parameters: Property parameter dictionary for the value. Only
available, if encode is set to True.
:type parameters: Dictionary
:param encode: True, if the value should be encoded to one of
icalendar's own property types (Fallback is "vText")
or False, if not.
:type encode: Boolean
:returns: icalendar property value
"""
if not encode:
return value
if isinstance(value, types_factory.all_types):
# Don't encode already encoded values.
return value
klass = types_factory.for_property(name)
obj = klass(value)
if parameters:
if isinstance(parameters, dict):
params = Parameters()
for key, item in parameters.items():
params[key] = item
parameters = params
assert isinstance(parameters, Parameters)
obj.params = parameters
return obj
def add(self, name, value, parameters=None, encode=1):
"""Add a property.
:param name: Name of the property.
:type name: string
:param value: Value of the property. Either of a basic Python type of
any of the icalendar's own property types.
:type value: Python native type or icalendar property type.
:param parameters: Property parameter dictionary for the value. Only
available, if encode is set to True.
:type parameters: Dictionary
:param encode: True, if the value should be encoded to one of
icalendar's own property types (Fallback is "vText")
or False, if not.
:type encode: Boolean
:returns: None
"""
if isinstance(value, datetime) and\
name.lower() in ('dtstamp', 'created', 'last-modified'):
# RFC expects UTC for those... force value conversion.
if getattr(value, 'tzinfo', False) and value.tzinfo is not None:
value = value.astimezone(pytz.utc)
else:
# assume UTC for naive datetime instances
value = pytz.utc.localize(value)
# encode value
if encode and isinstance(value, list) \
and name.lower() not in ['rdate', 'exdate']:
# Individually convert each value to an ical type except rdate and
# exdate, where lists of dates might be passed to vDDDLists.
value = [self._encode(name, v, parameters, encode) for v in value]
else:
value = self._encode(name, value, parameters, encode)
# set value
if name in self:
# If property already exists, append it.
oldval = self[name]
if isinstance(oldval, list):
if isinstance(value, list):
value = oldval + value
else:
oldval.append(value)
value = oldval
else:
value = [oldval, value]
self[name] = value
def _decode(self, name, value):
"""Internal for decoding property values.
"""
# TODO: Currently the decoded method calls the icalendar.prop instances
# from_ical. We probably want to decode properties into Python native
# types here. But when parsing from an ical string with from_ical, we
# want to encode the string into a real icalendar.prop property.
if isinstance(value, vDDDLists):
# TODO: Workaround unfinished decoding
return value
decoded = types_factory.from_ical(name, value)
# TODO: remove when proper decoded is implemented in every prop.* class
# Workaround to decode vText properly
if isinstance(decoded, vText):
decoded = decoded.encode(DEFAULT_ENCODING)
return decoded
def decoded(self, name, default=_marker):
"""Returns decoded value of property.
"""
# XXX: fail. what's this function supposed to do in the end?
# -rnix
if name in self:
value = self[name]
if isinstance(value, list):
return [self._decode(name, v) for v in value]
return self._decode(name, value)
else:
if default is _marker:
raise KeyError(name)
else:
return default
########################################################################
# Inline values. A few properties have multiple values inlined in in one
# property line. These methods are used for splitting and joining these.
def get_inline(self, name, decode=1):
"""Returns a list of values (split on comma).
"""
vals = [v.strip('" ') for v in q_split(self[name])]
if decode:
return [self._decode(name, val) for val in vals]
return vals
def set_inline(self, name, values, encode=1):
"""Converts a list of values into comma seperated string and sets value
to that.
"""
if encode:
values = [self._encode(name, value, encode=1) for value in values]
self[name] = types_factory['inline'](q_join(values))
#########################
# Handling of components
def add_component(self, component):
"""Add a subcomponent to this component.
"""
self.subcomponents.append(component)
def _walk(self, name):
"""Walk to given component.
"""
result = []
if name is None or self.name == name:
result.append(self)
for subcomponent in self.subcomponents:
result += subcomponent._walk(name)
return result
def walk(self, name=None):
"""Recursively traverses component and subcomponents. Returns sequence
of same. If name is passed, only components with name will be returned.
"""
if not name is None:
name = name.upper()
return self._walk(name)
#####################
# Generation
def property_items(self, recursive=True, sorted=True):
"""Returns properties in this component and subcomponents as:
[(name, value), ...]
"""
vText = types_factory['text']
properties = [('BEGIN', vText(self.name).to_ical())]
if sorted:
property_names = self.sorted_keys()
else:
property_names = self.keys()
for name in property_names:
values = self[name]
if isinstance(values, list):
# normally one property is one line
for value in values:
properties.append((name, value))
else:
properties.append((name, values))
if recursive:
# recursion is fun!
for subcomponent in self.subcomponents:
properties += subcomponent.property_items(sorted=sorted)
properties.append(('END', vText(self.name).to_ical()))
return properties
@classmethod
def from_ical(cls, st, multiple=False):
"""Populates the component recursively from a string.
"""
stack = [] # a stack of components
comps = []
for line in Contentlines.from_ical(st): # raw parsing
if not line:
continue
try:
name, params, vals = line.parts()
except ValueError:
# if unable to parse a line within a component
# that ignores exceptions, mark the component
# as broken and skip the line. otherwise raise.
component = stack[-1] if stack else None
if not component or not component.ignore_exceptions:
raise
component.is_broken = True
continue
uname = name.upper()
# check for start of component
if uname == 'BEGIN':
# try and create one of the components defined in the spec,
# otherwise get a general Components for robustness.
c_name = vals.upper()
c_class = component_factory.get(c_name, cls)
component = c_class()
if not getattr(component, 'name', ''): # undefined components
component.name = c_name
stack.append(component)
# check for end of event
elif uname == 'END':
# we are done adding properties to this component
# so pop it from the stack and add it to the new top.
component = stack.pop()
if not stack: # we are at the end
comps.append(component)
else:
if not component.is_broken:
stack[-1].add_component(component)
# we are adding properties to the current top of the stack
else:
factory = types_factory.for_property(name)
component = stack[-1]
datetime_names = ('DTSTART', 'DTEND', 'RECURRENCE-ID', 'DUE',
'FREEBUSY', 'RDATE', 'EXDATE')
try:
if name in datetime_names and 'TZID' in params:
vals = factory(factory.from_ical(vals, params['TZID']))
else:
vals = factory(factory.from_ical(vals))
except ValueError:
if not component.ignore_exceptions:
raise
component.is_broken = True
else:
vals.params = params
component.add(name, vals, encode=0)
if multiple:
return comps
if len(comps) > 1:
raise ValueError('Found multiple components where '
'only one is allowed: {st!r}'.format(**locals()))
if len(comps) < 1:
raise ValueError('Found no components where '
'exactly one is required: '
'{st!r}'.format(**locals()))
return comps[0]
def __repr__(self):
return '%s(%s)' % (self.name, data_encode(self))
def content_line(self, name, value, sorted=True):
"""Returns property as content line.
"""
params = getattr(value, 'params', Parameters())
return Contentline.from_parts(name, params, value, sorted=sorted)
def content_lines(self, sorted=True):
"""Converts the Component and subcomponents into content lines.
"""
contentlines = Contentlines()
for name, value in self.property_items(sorted=sorted):
cl = self.content_line(name, value, sorted=sorted)
contentlines.append(cl)
contentlines.append('') # remember the empty string in the end
return contentlines
def to_ical(self, sorted=True):
'''
:param sorted: Whether parameters and properties should be
lexicographically sorted.
'''
content_lines = self.content_lines(sorted=sorted)
return content_lines.to_ical()
#######################################
# components defined in RFC 2445
class Event(Component):
name = 'VEVENT'
canonical_order = (
'SUMMARY', 'DTSTART', 'DTEND', 'DURATION', 'DTSTAMP',
'UID', 'RECURRENCE-ID', 'SEQUENCE',
'RRULE' 'EXRULE', 'RDATE', 'EXDATE',
)
required = ('UID',)
singletons = (
'CLASS', 'CREATED', 'DESCRIPTION', 'DTSTART', 'GEO', 'LAST-MODIFIED',
'LOCATION', 'ORGANIZER', 'PRIORITY', 'DTSTAMP', 'SEQUENCE', 'STATUS',
'SUMMARY', 'TRANSP', 'URL', 'RECURRENCE-ID', 'DTEND', 'DURATION',
'DTSTART',
)
exclusive = ('DTEND', 'DURATION', )
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'EXRULE', 'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE'
)
ignore_exceptions = True
class Todo(Component):
name = 'VTODO'
required = ('UID',)
singletons = (
'CLASS', 'COMPLETED', 'CREATED', 'DESCRIPTION', 'DTSTAMP', 'DTSTART',
'GEO', 'LAST-MODIFIED', 'LOCATION', 'ORGANIZER', 'PERCENT', 'PRIORITY',
'RECURRENCE-ID', 'SEQUENCE', 'STATUS', 'SUMMARY', 'UID', 'URL', 'DUE',
'DURATION',
)
exclusive = ('DUE', 'DURATION',)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'EXRULE', 'RSTATUS', 'RELATED', 'RESOURCES', 'RDATE', 'RRULE'
)
class Journal(Component):
name = 'VJOURNAL'
required = ('UID',)
singletons = (
'CLASS', 'CREATED', 'DESCRIPTION', 'DTSTART', 'DTSTAMP',
'LAST-MODIFIED', 'ORGANIZER', 'RECURRENCE-ID', 'SEQUENCE', 'STATUS',
'SUMMARY', 'UID', 'URL',
)
multiple = (
'ATTACH', 'ATTENDEE', 'CATEGORIES', 'COMMENT', 'CONTACT', 'EXDATE',
'EXRULE', 'RELATED', 'RDATE', 'RRULE', 'RSTATUS',
)
class FreeBusy(Component):
name = 'VFREEBUSY'
required = ('UID',)
singletons = (
'CONTACT', 'DTSTART', 'DTEND', 'DURATION', 'DTSTAMP', 'ORGANIZER',
'UID', 'URL',
)
multiple = ('ATTENDEE', 'COMMENT', 'FREEBUSY', 'RSTATUS',)
class Timezone(Component):
name = 'VTIMEZONE'
canonical_order = ('TZID', 'STANDARD', 'DAYLIGHT',)
required = ('TZID', 'STANDARD', 'DAYLIGHT',)
singletons = ('TZID', 'LAST-MODIFIED', 'TZURL',)
class TimezoneStandard(Component):
name = 'STANDARD'
required = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM')
singletons = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM', 'RRULE')
multiple = ('COMMENT', 'RDATE', 'TZNAME')
class TimezoneDaylight(Component):
name = 'DAYLIGHT'
required = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM')
singletons = ('DTSTART', 'TZOFFSETTO', 'TZOFFSETFROM', 'RRULE')
multiple = ('COMMENT', 'RDATE', 'TZNAME')
class Alarm(Component):
name = 'VALARM'
# not quite sure about these ...
required = ('ACTION', 'TRIGGER',)
singletons = ('ATTACH', 'ACTION', 'TRIGGER', 'DURATION', 'REPEAT',)
inclusive = (('DURATION', 'REPEAT',),)
class Calendar(Component):
"""This is the base object for an iCalendar file.
"""
name = 'VCALENDAR'
canonical_order = ('VERSION', 'PRODID', 'CALSCALE', 'METHOD',)
required = ('prodid', 'version', )
singletons = ('prodid', 'version', )
multiple = ('calscale', 'method', )
# These are read only singleton, so one instance is enough for the module
types_factory = TypesFactory()
component_factory = ComponentFactory()
|
|
import asyncio
import collections
import io
import json
import ssl
import sys
import traceback
import warnings
from collections import namedtuple
from hashlib import md5, sha1, sha256
from http.cookies import CookieError, Morsel, SimpleCookie
from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
from yarl import URL
from . import hdrs, helpers, http, payload
from .client_exceptions import (ClientConnectionError, ClientOSError,
ClientResponseError, ContentTypeError,
InvalidURL)
from .formdata import FormData
from .helpers import HeadersMixin, TimerNoop, noop
from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, PayloadWriter
from .log import client_logger
from .streams import FlowControlStreamReader
try:
import cchardet as chardet
except ImportError: # pragma: no cover
import chardet
__all__ = ('ClientRequest', 'ClientResponse', 'RequestInfo')
RequestInfo = collections.namedtuple(
'RequestInfo', ('url', 'method', 'headers'))
HASHFUNC_BY_DIGESTLEN = {
16: md5,
20: sha1,
32: sha256,
}
_SSL_OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
ConnectionKey = namedtuple('ConnectionKey', ['host', 'port', 'ssl'])
class ClientRequest:
GET_METHODS = {
hdrs.METH_GET,
hdrs.METH_HEAD,
hdrs.METH_OPTIONS,
hdrs.METH_TRACE,
}
POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})
DEFAULT_HEADERS = {
hdrs.ACCEPT: '*/*',
hdrs.ACCEPT_ENCODING: 'gzip, deflate',
}
body = b''
auth = None
response = None
response_class = None
_writer = None # async task for streaming data
_continue = None # waiter future for '100 Continue' response
# N.B.
# Adding __del__ method with self._writer closing doesn't make sense
# because _writer is instance method, thus it keeps a reference to self.
# Until writer has finished finalizer will not be called.
def __init__(self, method, url, *,
params=None, headers=None, skip_auto_headers=frozenset(),
data=None, cookies=None,
auth=None, version=http.HttpVersion11, compress=None,
chunked=None, expect100=False,
loop=None, response_class=None,
proxy=None, proxy_auth=None,
timer=None, session=None, auto_decompress=True,
verify_ssl=None, fingerprint=None, ssl_context=None,
proxy_headers=None):
if verify_ssl is False and ssl_context is not None:
raise ValueError(
"Either disable ssl certificate validation by "
"verify_ssl=False or specify ssl_context, not both.")
if loop is None:
loop = asyncio.get_event_loop()
assert isinstance(url, URL), url
assert isinstance(proxy, (URL, type(None))), proxy
self._session = session
if params:
q = MultiDict(url.query)
url2 = url.with_query(params)
q.extend(url2.query)
url = url.with_query(q)
self.url = url.with_fragment(None)
self.original_url = url
self.method = method.upper()
self.chunked = chunked
self.compress = compress
self.loop = loop
self.length = None
self.response_class = response_class or ClientResponse
self._timer = timer if timer is not None else TimerNoop()
self._auto_decompress = auto_decompress
self._verify_ssl = verify_ssl
self._ssl_context = ssl_context
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding(data)
self.update_auth(auth)
self.update_proxy(proxy, proxy_auth, proxy_headers)
self.update_fingerprint(fingerprint)
self.update_body_from_data(data)
if data or self.method not in self.GET_METHODS:
self.update_transfer_encoding()
self.update_expect_continue(expect100)
@property
def connection_key(self):
return ConnectionKey(self.host, self.port, self.ssl)
@property
def host(self):
return self.url.host
@property
def port(self):
return self.url.port
@property
def request_info(self):
return RequestInfo(self.url, self.method, self.headers)
def update_host(self, url):
"""Update destination host, port and connection type (ssl)."""
# get host/port
if not url.host:
raise InvalidURL(url)
# basic auth info
username, password = url.user, url.password
if username:
self.auth = helpers.BasicAuth(username, password or '')
# Record entire netloc for usage in host header
scheme = url.scheme
self.ssl = scheme in ('https', 'wss')
def update_version(self, version):
"""Convert request version to two elements tuple.
parser HTTP version '1.1' => (1, 1)
"""
if isinstance(version, str):
v = [l.strip() for l in version.split('.', 1)]
try:
version = int(v[0]), int(v[1])
except ValueError:
raise ValueError(
'Can not parse http version number: {}'
.format(version)) from None
self.version = version
def update_headers(self, headers):
"""Update request headers."""
self.headers = CIMultiDict()
if headers:
if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
headers = headers.items()
for key, value in headers:
self.headers.add(key, value)
def update_auto_headers(self, skip_auto_headers):
self.skip_auto_headers = CIMultiDict(
(hdr, None) for hdr in sorted(skip_auto_headers))
used_headers = self.headers.copy()
used_headers.extend(self.skip_auto_headers)
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in used_headers:
self.headers.add(hdr, val)
# add host
if hdrs.HOST not in used_headers:
netloc = self.url.raw_host
if not self.url.is_default_port():
netloc += ':' + str(self.url.port)
self.headers[hdrs.HOST] = netloc
if hdrs.USER_AGENT not in used_headers:
self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
def update_cookies(self, cookies):
"""Update request cookies header."""
if not cookies:
return
c = SimpleCookie()
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ''))
del self.headers[hdrs.COOKIE]
for name, value in cookies.items():
if isinstance(value, Morsel):
# Preserve coded_value
mrsl_val = value.get(value.key, Morsel())
mrsl_val.set(value.key, value.value, value.coded_value)
c[name] = mrsl_val
else:
c[name] = value
self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip()
def update_content_encoding(self, data):
"""Set request content encoding."""
if not data:
return
enc = self.headers.get(hdrs.CONTENT_ENCODING, '').lower()
if enc:
if self.compress:
raise ValueError(
'compress can not be set '
'if Content-Encoding header is set')
elif self.compress:
if not isinstance(self.compress, str):
self.compress = 'deflate'
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def update_transfer_encoding(self):
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, '').lower()
if 'chunked' in te:
if self.chunked:
raise ValueError(
'chunked can not be set '
'if "Transfer-Encoding: chunked" header is set')
elif self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
raise ValueError(
'chunked can not be set '
'if Content-Length header is set')
self.headers[hdrs.TRANSFER_ENCODING] = 'chunked'
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
def update_auth(self, auth):
"""Set basic auth."""
if auth is None:
auth = self.auth
if auth is None:
return
if not isinstance(auth, helpers.BasicAuth):
raise TypeError('BasicAuth() tuple is required instead')
self.headers[hdrs.AUTHORIZATION] = auth.encode()
def update_body_from_data(self, body):
if not body:
return
# FormData
if isinstance(body, FormData):
body = body()
try:
body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
except payload.LookupError:
body = FormData(body)()
self.body = body
# enable chunked encoding if needed
if not self.chunked:
if hdrs.CONTENT_LENGTH not in self.headers:
size = body.size
if size is None:
self.chunked = True
else:
if hdrs.CONTENT_LENGTH not in self.headers:
self.headers[hdrs.CONTENT_LENGTH] = str(size)
# set content-type
if (hdrs.CONTENT_TYPE not in self.headers and
hdrs.CONTENT_TYPE not in self.skip_auto_headers):
self.headers[hdrs.CONTENT_TYPE] = body.content_type
# copy payload headers
if body.headers:
for (key, value) in body.headers.items():
if key not in self.headers:
self.headers[key] = value
def update_expect_continue(self, expect=False):
if expect:
self.headers[hdrs.EXPECT] = '100-continue'
elif self.headers.get(hdrs.EXPECT, '').lower() == '100-continue':
expect = True
if expect:
self._continue = helpers.create_future(self.loop)
def update_proxy(self, proxy, proxy_auth, proxy_headers):
if proxy and not proxy.scheme == 'http':
raise ValueError("Only http proxies are supported")
if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):
raise ValueError("proxy_auth must be None or BasicAuth() tuple")
self.proxy = proxy
self.proxy_auth = proxy_auth
self.proxy_headers = proxy_headers
def update_fingerprint(self, fingerprint):
if fingerprint:
digestlen = len(fingerprint)
hashfunc = HASHFUNC_BY_DIGESTLEN.get(digestlen)
if not hashfunc:
raise ValueError('fingerprint has invalid length')
elif hashfunc is md5 or hashfunc is sha1:
warnings.warn('md5 and sha1 are insecure and deprecated. '
'Use sha256.',
DeprecationWarning, stacklevel=2)
client_logger.warn('md5 and sha1 are insecure and deprecated. '
'Use sha256.')
self._hashfunc = hashfunc
self._fingerprint = fingerprint
@property
def verify_ssl(self):
"""Do check for ssl certifications?"""
return self._verify_ssl
@property
def fingerprint(self):
"""Expected ssl certificate fingerprint."""
return self._fingerprint
@property
def ssl_context(self):
"""SSLContext instance for https requests."""
return self._ssl_context
def keep_alive(self):
if self.version < HttpVersion10:
# keep alive not supported at all
return False
if self.version == HttpVersion10:
if self.headers.get(hdrs.CONNECTION) == 'keep-alive':
return True
else: # no headers means we close for Http 1.0
return False
elif self.headers.get(hdrs.CONNECTION) == 'close':
return False
return True
@asyncio.coroutine
def write_bytes(self, writer, conn):
"""Support coroutines that yields bytes objects."""
# 100 response
if self._continue is not None:
yield from writer.drain()
yield from self._continue
try:
if isinstance(self.body, payload.Payload):
yield from self.body.write(writer)
else:
if isinstance(self.body, (bytes, bytearray)):
self.body = (self.body,)
for chunk in self.body:
writer.write(chunk)
yield from writer.write_eof()
except OSError as exc:
new_exc = ClientOSError(
exc.errno,
'Can not write request body for %s' % self.url)
new_exc.__context__ = exc
new_exc.__cause__ = exc
conn.protocol.set_exception(new_exc)
except asyncio.CancelledError as exc:
if not conn.closed:
conn.protocol.set_exception(exc)
except Exception as exc:
conn.protocol.set_exception(exc)
finally:
self._writer = None
def send(self, conn):
# Specify request target:
# - CONNECT request must send authority form URI
# - not CONNECT proxy must send absolute form URI
# - most common is origin form URI
if self.method == hdrs.METH_CONNECT:
path = '{}:{}'.format(self.url.raw_host, self.url.port)
elif self.proxy and not self.ssl:
path = str(self.url)
else:
path = self.url.raw_path
if self.url.raw_query_string:
path += '?' + self.url.raw_query_string
writer = PayloadWriter(conn.writer, self.loop)
if self.compress:
writer.enable_compression(self.compress)
if self.chunked is not None:
writer.enable_chunking()
# set default content-type
if (self.method in self.POST_METHODS and
hdrs.CONTENT_TYPE not in self.skip_auto_headers and
hdrs.CONTENT_TYPE not in self.headers):
self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
# set the connection header
connection = self.headers.get(hdrs.CONNECTION)
if not connection:
if self.keep_alive():
if self.version == HttpVersion10:
connection = 'keep-alive'
else:
if self.version == HttpVersion11:
connection = 'close'
if connection is not None:
self.headers[hdrs.CONNECTION] = connection
# status + headers
status_line = '{0} {1} HTTP/{2[0]}.{2[1]}\r\n'.format(
self.method, path, self.version)
writer.write_headers(status_line, self.headers)
self._writer = asyncio.ensure_future(
self.write_bytes(writer, conn), loop=self.loop)
self.response = self.response_class(
self.method, self.original_url,
writer=self._writer, continue100=self._continue, timer=self._timer,
request_info=self.request_info,
auto_decompress=self._auto_decompress
)
self.response._post_init(self.loop, self._session)
return self.response
@asyncio.coroutine
def close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
def terminate(self):
if self._writer is not None:
if not self.loop.is_closed():
self._writer.cancel()
self._writer = None
class ClientResponse(HeadersMixin):
# from the Status-Line of the response
version = None # HTTP-Version
status = None # Status-Code
reason = None # Reason-Phrase
content = None # Payload stream
headers = None # Response headers, CIMultiDictProxy
raw_headers = None # Response raw headers, a sequence of pairs
_connection = None # current connection
flow_control_class = FlowControlStreamReader # reader flow control
_reader = None # input stream
_source_traceback = None
# setted up by ClientRequest after ClientResponse object creation
# post-init stage allows to not change ctor signature
_loop = None
_closed = True # to allow __del__ for non-initialized properly response
_session = None
def __init__(self, method, url, *,
writer=None, continue100=None, timer=None,
request_info=None, auto_decompress=True):
assert isinstance(url, URL)
self.method = method
self.headers = None
self.cookies = SimpleCookie()
self._url = url
self._content = None
self._writer = writer
self._continue = continue100
self._closed = True
self._history = ()
self._request_info = request_info
self._timer = timer if timer is not None else TimerNoop()
self._auto_decompress = auto_decompress
@property
def url(self):
return self._url
@property
def url_obj(self):
warnings.warn(
"Deprecated, use .url #1654", DeprecationWarning, stacklevel=2)
return self._url
@property
def host(self):
return self._url.host
@property
def _headers(self):
return self.headers
@property
def request_info(self):
return self._request_info
def _post_init(self, loop, session):
self._loop = loop
self._session = session # store a reference to session #1985
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
def __del__(self, _warnings=warnings):
if self._loop is None:
return # not started
if self._closed:
return
if self._connection is not None:
self._connection.release()
self._cleanup_writer()
# warn
if __debug__:
if self._loop.get_debug():
_warnings.warn("Unclosed response {!r}".format(self),
ResourceWarning)
context = {'client_response': self,
'message': 'Unclosed response'}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
def __repr__(self):
out = io.StringIO()
ascii_encodable_url = str(self.url)
if self.reason:
ascii_encodable_reason = self.reason.encode('ascii',
'backslashreplace') \
.decode('ascii')
else:
ascii_encodable_reason = self.reason
print('<ClientResponse({}) [{} {}]>'.format(
ascii_encodable_url, self.status, ascii_encodable_reason),
file=out)
print(self.headers, file=out)
return out.getvalue()
@property
def connection(self):
return self._connection
@property
def history(self):
"""A sequence of of responses, if redirects occurred."""
return self._history
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
"""Start response processing."""
self._closed = False
self._protocol = connection.protocol
self._connection = connection
connection.protocol.set_response_params(
timer=self._timer,
skip_payload=self.method.lower() == 'head',
skip_status_codes=(204, 304),
read_until_eof=read_until_eof,
auto_decompress=self._auto_decompress)
with self._timer:
while True:
# read response
try:
(message, payload) = yield from self._protocol.read()
except http.HttpProcessingError as exc:
raise ClientResponseError(
self.request_info, self.history,
code=exc.code,
message=exc.message, headers=exc.headers) from exc
if (message.code < 100 or
message.code > 199 or message.code == 101):
break
if self._continue is not None and not self._continue.done():
self._continue.set_result(True)
self._continue = None
# payload eof handler
payload.on_eof(self._response_eof)
# response status
self.version = message.version
self.status = message.code
self.reason = message.reason
# headers
self.headers = CIMultiDictProxy(message.headers)
self.raw_headers = tuple(message.raw_headers)
# payload
self.content = payload
# cookies
for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):
try:
self.cookies.load(hdr)
except CookieError as exc:
client_logger.warning(
'Can not load response cookies: %s', exc)
return self
def _response_eof(self):
if self._closed:
return
if self._connection is not None:
# websocket, protocol could be None because
# connection could be detached
if (self._connection.protocol is not None and
self._connection.protocol.upgraded):
return
self._connection.release()
self._connection = None
self._closed = True
self._cleanup_writer()
@property
def closed(self):
return self._closed
def close(self):
if self._closed:
return
self._closed = True
if self._loop is None or self._loop.is_closed():
return
if self._connection is not None:
self._connection.close()
self._connection = None
self._cleanup_writer()
self._notify_content()
def release(self):
if self._closed:
return noop()
self._closed = True
if self._connection is not None:
self._connection.release()
self._connection = None
self._cleanup_writer()
self._notify_content()
return noop()
def raise_for_status(self):
if 400 <= self.status:
raise ClientResponseError(
self.request_info,
self.history,
code=self.status,
message=self.reason,
headers=self.headers)
def _cleanup_writer(self):
if self._writer is not None and not self._writer.done():
self._writer.cancel()
self._writer = None
self._session = None
def _notify_content(self):
content = self.content
if content and content.exception() is None and not content.is_eof():
content.set_exception(
ClientConnectionError('Connection closed'))
@asyncio.coroutine
def wait_for_close(self):
if self._writer is not None:
try:
yield from self._writer
finally:
self._writer = None
self.release()
@asyncio.coroutine
def read(self):
"""Read response payload."""
if self._content is None:
try:
self._content = yield from self.content.read()
except Exception:
self.close()
raise
return self._content
def _get_encoding(self):
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
mimetype = helpers.parse_mimetype(ctype)
encoding = mimetype.parameters.get('charset')
if not encoding:
if mimetype.type == 'application' and mimetype.subtype == 'json':
# RFC 7159 states that the default encoding is UTF-8.
encoding = 'utf-8'
else:
encoding = chardet.detect(self._content)['encoding']
if not encoding:
encoding = 'utf-8'
return encoding
@asyncio.coroutine
def text(self, encoding=None, errors='strict'):
"""Read response payload and decode."""
if self._content is None:
yield from self.read()
if encoding is None:
encoding = self._get_encoding()
return self._content.decode(encoding, errors=errors)
@asyncio.coroutine
def json(self, *, encoding=None, loads=json.loads,
content_type='application/json'):
"""Read and decodes JSON response."""
if self._content is None:
yield from self.read()
if content_type:
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
if content_type not in ctype:
raise ContentTypeError(
self.request_info,
self.history,
message=('Attempt to decode JSON with '
'unexpected mimetype: %s' % ctype),
headers=self.headers)
stripped = self._content.strip()
if not stripped:
return None
if encoding is None:
encoding = self._get_encoding()
return loads(stripped.decode(encoding))
@asyncio.coroutine
def __aenter__(self):
return self
@asyncio.coroutine
def __aexit__(self, exc_type, exc_val, exc_tb):
# similar to _RequestContextManager, we do not need to check
# for exceptions, response object can closes connection
# is state is broken
self.release()
|
|
import time
def weightprice(ok,hk,lk,ck,one,add=0):
return add+(ok*one['o']+hk*one['h']+lk*one['l']+ck*one['c'])/(ok+hk+lk+ck)
funcs = {
'wprice':weightprice,
}
class SVG:
height = 400.0 # 400.0
block = 10 # 8
width = 6 # 6
half = 3 # 3
center = 5 # 4
border = 1 # 1
margin = 10 # 10
up = (220,0,0)
dn = (0,128,0)
eq = (74,182,198)
red = (255,0,0)
yellow = (255,255,0)
green = (0,200,0)
blue = (0,0,255)
grey = (100,100,100)
grey2 = (200,200,200)
timecolor = [(255, 255, 0, ),
(128, 128, 0, ),
(0, 255, 255, ),
(255, 0, 255, ),
(0, 128, 128, ),
(128, 0, 128, )]
line_keys = None
lines = {
"see":[
(("signal",), eq, True, "ss"),
(("uuu",), red, True, "ss"),
(("nnn",), green, True, "ss"),
(("bu",), red, False, "ohlc"),
(("bn",), green, False, "ohlc"),
# (("wprice",(1,2,3,4),(('add',10),)), red, False, "ohlc"),
# (("line",100), green, True, "ss"),
],
"only":[
(("point",), grey, False, "ohlc"),
],
}
def get_lines(self):
if self.line_keys:
return self.line_keys
else:
_l = self.lines.keys()
_l.sort()
self.line_keys = _l
return self.line_keys
def to_html(self):
self.make_k()
out = []
n = 1
for i in self.txt:
out.append(self.text(i,1,self.height-n*15,"red"))
n+=1
out.append('<!--draw percent-->')
out.append(self.draw_percent())
out.append('<!--draw ohlc-->')
out.append(self.draw_ohlc())
out.append('<!--draw lines-->')
out.append(self.draw_lines())
# out.append("<br/>")
# out.append("<br/>")
return self.svg(''.join(out))
def draw_ohlc(self):
out = []
for i in xrange(len(self.data)):
one = self.data[i]
_hour = int(float(one.get('_time',0))/3600)%6
color = self.timecolor[_hour]
o = self.magic_y(one['o'],'ohlc')
h = self.magic_y(one['h'],'ohlc')
l = self.magic_y(one['l'],'ohlc')
c = self.magic_y(one['c'],'ohlc')
out.append(self.draw_time_color(i,color)+self.ohlc(i,o,h,l,c))
return ''.join(out)
def draw_percent(self):
out = []
_num = len(self.data)
style = "fill:none;stroke:rgb(%d,%d,%d);stroke-width:1"%self.grey2
for i in xrange(1,8):
out.append(self.line([(0,i*self.height/8),(_num*self.block,i*self.height/8.0)],style))
n = 0
for ks in self.k.keys():
k = self.k[ks]
_min = self.min[ks]
for j in xrange(1,8):
out.append(self.text('%s=%.1f'%(ks,k*(self.height-(8-j)*self.height/8.0)+_min),200+100*n,1+j*self.height/8.0,"grey"))
n+=1
return ''.join(out)
def draw_lines(self):
out = []
for k,v in self.out.items():
tag = k[-1]
color = k[-3]
style = "fill:none;stroke:rgb(%d,%d,%d);stroke-width:1"%color
min = self.min[tag]
k = self.k[tag]
data = []
i = 0
for o in v:
center = i*self.block+self.border+self.half
y = (o-min)/k
data.append((center,y))
i+=1
out.append(self.line(data,style))
return ''.join(out)
def ohlc(self,pos,o,h,l,c):
center = pos*self.block+self.border+self.half
x = pos*self.block+self.border
if c-o>0.1:
clr = self.up
rect = self.rect([(x,c),(x,o),(x+self.width,o),(x+self.width,c)],
"fill:rgb(%d,%d,%d);stroke:none;"%clr,"k")
elif o-c>0.1:
clr = self.dn
rect = self.rect([(x,o),(x,c),(x+self.width,c),(x+self.width,o)],
"fill:rgb(%d,%d,%d);stroke:none;"%clr,"k")
else:
clr = self.eq
rect = self.rect([(x,o),(x,o+1),(x+self.width,o+1),(x+self.width,o)],
"fill:rgb(%d,%d,%d);stroke:none;"%clr,"k")
line = self.line([(center,h),(center,l)],"fill:none;stroke:rgb(%d,%d,%d);stroke-width:1"%clr)
return line+rect
def draw_time_color(self,pos,color):
x = pos*self.block
return self.rect([
(x,self.height/2+2),
(x,self.height/2-2),
(x+self.block,self.height/2-2),
(x+self.block,self.height/2+2)
],"fill:rgb(%d,%d,%d);stroke:none"%color,"color")
def text(self,t,x,y,fill):
return '''<text x="%d" y="%.2f" fill="%s">%s</text>'''%(x,self.re_y(y),fill,t)
def line(self,lines,style):
ps = map(lambda x:'%d %.2f'%(x[0],self.re_y(x[1])),lines)
xy = ' '.join(ps)
return '''<polyline points="%s" style="%s"/>'''%(xy,style)
def rect(self,points,style,tag):
ps = map(lambda x:'%d %.2f'%(x[0],self.re_y(x[1])),points)
xy = ' L'.join(ps)
return '''<path d="M%s Z" style="%s" class="%s"/>'''%(xy,style,tag)
def make_k(self):
tmp = {}
for o in self.data:
self.max['ohlc'] = max(self.max.get('ohlc',o['h']+10),o['h'])
self.min['ohlc'] = min(self.min.get('ohlc',o['l']-10),o['l'])
self.k['ohlc'] = 0
for one in self.group:
k = one[0]
value = tmp.get(k,0.0)
_len = len(k)
if _len == 1 and k[0] in o:
value = o[k[0]]
elif _len == 1 and k[0][0]=='-' and k[0][1:] in o:
value = -1*o[k[0][1:]]
elif _len == 2:
value = k[1]
elif _len == 3 and k[0] in funcs:
_args = k[1]+(o,)
_kvs = dict(k[2])
value = funcs[k[0]](*_args,**_kvs)
tmp[k] = value
_l = self.out.get(one,[])
_l.append(value)
self.out[one] = _l
if one[-2]:
self.max[one[-1]] = max(self.max.get(one[-1], abs(value)), abs(value))
self.min[one[-1]] = min(self.min.get(one[-1],-1*abs(value)),-1*abs(value))
else:
self.max[one[-1]] = max(self.max.get(one[-1],value),value)
self.min[one[-1]] = min(self.min.get(one[-1],value),value)
self.k[one[-1]] = 0
for ks in self.k.keys():
self.max[ks]+=1
self.min[ks]-=1
self.k[ks] = max(0.001,(self.max[ks]-self.min[ks])/self.height)
def __init__(self,pos,datalist,textlist):
self.txt = textlist
self.len = len(datalist)
self.group = self.lines.get(pos,[])
self.data = datalist
self.max = {}
self.min = {}
self.k = {}
self.out = {}
def re_y(self,v):return self.height-v
def magic_y(self,v,tag):return (v-self.min[tag])/self.k[tag]
def svg(self,l):
return '''<svg width="%d" height="500" xmlns="http://www.w3.org/2000/svg" version="1.1"><g>'''%max(400,self.len*self.block)+l+'''</g></svg>'''
if __name__ == '__main__':
s = SVG('',[],'')
s.get_lines()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: language_level=3
"""
This module is for internal use only; no backwards-compatibility guarantees.
The classes in this file keep shared state, and organize metrics information.
Available classes:
- MetricKey - Internal key for a metric.
- MetricResult - Current status of a metric's updates/commits.
- _MetricsEnvironment - Keeps track of MetricsContainer and other metrics
information for every single execution working thread.
- MetricsContainer - Holds the metrics of a single step and a single
unit-of-commit (bundle).
"""
# pytype: skip-file
import threading
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import FrozenSet
from typing import Optional
from typing import Type
from typing import Union
from typing import cast
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.cells import CounterCell
from apache_beam.metrics.cells import DistributionCell
from apache_beam.metrics.cells import GaugeCell
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.statesampler import get_current_tracker
if TYPE_CHECKING:
from apache_beam.metrics.cells import GaugeData
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.cells import MetricCell
from apache_beam.metrics.cells import MetricCellFactory
from apache_beam.metrics.metricbase import MetricName
from apache_beam.portability.api import metrics_pb2
class MetricKey(object):
"""Key used to identify instance of metric cell.
Metrics are internally keyed by the name of the step they're associated with,
the name and namespace (if it is a user defined metric) of the metric,
and any extra label metadata added by the runner specific metric collection
service.
"""
def __init__(self, step, metric, labels=None):
"""Initializes ``MetricKey``.
Args:
step: A string with the step this metric cell is part of.
metric: A ``MetricName`` namespace+name that identifies a metric.
labels: An arbitrary set of labels that also identifies the metric.
"""
self.step = step
self.metric = metric
self.labels = labels if labels else dict()
def __eq__(self, other):
return (
self.step == other.step and self.metric == other.metric and
self.labels == other.labels)
def __hash__(self):
return hash((self.step, self.metric, frozenset(self.labels)))
def __repr__(self):
return 'MetricKey(step={}, metric={}, labels={})'.format(
self.step, self.metric, self.labels)
class MetricResult(object):
"""Keeps track of the status of a metric within a single bundle.
It contains the physical and logical updates to the metric. Physical updates
are updates that have not necessarily been committed, but that have been made
during pipeline execution. Logical updates are updates that have been
committed.
Attributes:
key: A ``MetricKey`` that identifies the metric and bundle of this result.
committed: The committed updates of the metric. This attribute's type is
of metric type result (e.g. int, DistributionResult, GaugeResult).
attempted: The logical updates of the metric. This attribute's type is that
of metric type result (e.g. int, DistributionResult, GaugeResult).
"""
def __init__(self, key, committed, attempted):
"""Initializes ``MetricResult``.
Args:
key: A ``MetricKey`` object.
committed: Metric data that has been committed (e.g. logical updates)
attempted: Metric data that has been attempted (e.g. physical updates)
"""
self.key = key
self.committed = committed
self.attempted = attempted
def __eq__(self, other):
return (
self.key == other.key and self.committed == other.committed and
self.attempted == other.attempted)
def __hash__(self):
return hash((self.key, self.committed, self.attempted))
def __repr__(self):
return 'MetricResult(key={}, committed={}, attempted={})'.format(
self.key, str(self.committed), str(self.attempted))
def __str__(self):
return repr(self)
@property
def result(self):
"""Short-hand for falling back to attempted metrics if it seems that
committed was not populated (e.g. due to not being supported on a given
runner"""
return self.committed if self.committed else self.attempted
class _MetricsEnvironment(object):
"""Holds the MetricsContainer for every thread and other metric information.
This class is not meant to be instantiated, instead being used to keep
track of global state.
"""
def current_container(self):
"""Returns the current MetricsContainer."""
sampler = statesampler.get_current_tracker()
if sampler is None:
return None
return sampler.current_state().metrics_container
def process_wide_container(self):
"""Returns the MetricsContainer for process wide metrics, e.g. memory."""
return PROCESS_WIDE_METRICS_CONTAINER
MetricsEnvironment = _MetricsEnvironment()
class _TypedMetricName(object):
"""Like MetricName, but also stores the cell type of the metric."""
def __init__(
self,
cell_type, # type: Union[Type[MetricCell], MetricCellFactory]
metric_name # type: Union[str, MetricName]
):
# type: (...) -> None
self.cell_type = cell_type
self.metric_name = metric_name
if isinstance(metric_name, str):
self.fast_name = metric_name
else:
self.fast_name = metric_name.fast_name()
# Cached for speed, as this is used as a key for every counter update.
self._hash = hash((cell_type, self.fast_name))
def __eq__(self, other):
return self is other or (
self.cell_type == other.cell_type and self.fast_name == other.fast_name)
def __hash__(self):
return self._hash
def __str__(self):
return '%s %s' % (self.cell_type, self.metric_name)
def __reduce__(self):
return _TypedMetricName, (self.cell_type, self.metric_name)
_DEFAULT = None # type: Any
class MetricUpdater(object):
"""A callable that updates the metric as quickly as possible."""
def __init__(
self,
cell_type, # type: Union[Type[MetricCell], MetricCellFactory]
metric_name, # type: Union[str, MetricName]
default_value=None,
process_wide=False):
self.process_wide = process_wide
self.typed_metric_name = _TypedMetricName(cell_type, metric_name)
self.default_value = default_value
def __call__(self, value=_DEFAULT):
# type: (Any) -> None
if value is _DEFAULT:
if self.default_value is _DEFAULT:
raise ValueError(
'Missing value for update of %s' % self.typed_metric_name.fast_name)
value = self.default_value
if self.process_wide:
MetricsEnvironment.process_wide_container().get_metric_cell(
self.typed_metric_name).update(value)
else:
tracker = get_current_tracker()
if tracker is not None:
tracker.update_metric(self.typed_metric_name, value)
def __reduce__(self):
return MetricUpdater, (
self.typed_metric_name.cell_type,
self.typed_metric_name.metric_name,
self.default_value)
class MetricsContainer(object):
"""Holds the metrics of a single step and a single bundle.
Or the metrics associated with the process/SDK harness. I.e. memory usage.
"""
def __init__(self, step_name):
self.step_name = step_name
self.lock = threading.Lock()
self.metrics = dict() # type: Dict[_TypedMetricName, MetricCell]
def get_counter(self, metric_name):
# type: (MetricName) -> CounterCell
return cast(
CounterCell,
self.get_metric_cell(_TypedMetricName(CounterCell, metric_name)))
def get_distribution(self, metric_name):
# type: (MetricName) -> DistributionCell
return cast(
DistributionCell,
self.get_metric_cell(_TypedMetricName(DistributionCell, metric_name)))
def get_gauge(self, metric_name):
# type: (MetricName) -> GaugeCell
return cast(
GaugeCell,
self.get_metric_cell(_TypedMetricName(GaugeCell, metric_name)))
def get_metric_cell(self, typed_metric_name):
# type: (_TypedMetricName) -> MetricCell
cell = self.metrics.get(typed_metric_name, None)
if cell is None:
with self.lock:
cell = self.metrics[typed_metric_name] = typed_metric_name.cell_type()
return cell
def get_cumulative(self):
# type: () -> MetricUpdates
"""Return MetricUpdates with cumulative values of all metrics in container.
This returns all the cumulative values for all metrics.
"""
counters = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == CounterCell
}
distributions = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == DistributionCell
}
gauges = {
MetricKey(self.step_name, k.metric_name): v.get_cumulative()
for k,
v in self.metrics.items() if k.cell_type == GaugeCell
}
return MetricUpdates(counters, distributions, gauges)
def to_runner_api(self):
return [
cell.to_runner_api_user_metric(key.metric_name) for key,
cell in self.metrics.items()
]
def to_runner_api_monitoring_infos(self, transform_id):
# type: (str) -> Dict[FrozenSet, metrics_pb2.MonitoringInfo]
"""Returns a list of MonitoringInfos for the metrics in this container."""
with self.lock:
items = list(self.metrics.items())
all_metrics = [
cell.to_runner_api_monitoring_info(key.metric_name, transform_id)
for key,
cell in items
]
return {
monitoring_infos.to_key(mi): mi
for mi in all_metrics if mi is not None
}
def reset(self):
# type: () -> None
for metric in self.metrics.values():
metric.reset()
def __reduce__(self):
raise NotImplementedError
PROCESS_WIDE_METRICS_CONTAINER = MetricsContainer(None)
class MetricUpdates(object):
"""Contains updates for several metrics.
A metric update is an object containing information to update a metric.
For Distribution metrics, it is DistributionData, and for Counter metrics,
it's an int.
"""
def __init__(
self,
counters=None, # type: Optional[Dict[MetricKey, int]]
distributions=None, # type: Optional[Dict[MetricKey, DistributionData]]
gauges=None # type: Optional[Dict[MetricKey, GaugeData]]
):
# type: (...) -> None
"""Create a MetricUpdates object.
Args:
counters: Dictionary of MetricKey:MetricUpdate updates.
distributions: Dictionary of MetricKey:MetricUpdate objects.
gauges: Dictionary of MetricKey:MetricUpdate objects.
"""
self.counters = counters or {}
self.distributions = distributions or {}
self.gauges = gauges or {}
|
|
# Copyright 2013, 2014 IBM Corp.
# mock module
import mock
import sys
import stubout
import unittest
sys.modules['powervc.common.client'] = mock.MagicMock()
# import _
from oslo import i18n
i18n.install('cinder')
from powervc.common import config
from cinder import exception
from cinder import db
from powervc.volume.driver.service import PowerVCService
import six
class StorageProvider():
def __init__(self, i):
self.free_capacity_gb = (i + 1) * 5
self.total_capacity_gb = (i + 1) * 10
class VolumeMetadataWithPVCID():
def __init__(self, pvc_id="1234"):
self.key = "pvc:id"
self.value = pvc_id
class Volume():
def __init__(self, info):
self._info = info
self._add_details(info)
def setattr(self, key, val):
self.__setattr__(key, val)
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
except AttributeError:
# In this case we already defined the attribute on the class
pass
class PowerVCDriverTestCase(unittest.TestCase):
stubs = stubout.StubOutForTesting()
def setUp(self):
super(PowerVCDriverTestCase, self).setUp()
self.stubs.Set(PowerVCService, '_client', mock.MagicMock())
# we need mock load config file before import PowerVCDriver class
config.parse_power_config = mock.MagicMock()
config.CONF.log_opt_values = mock.MagicMock()
from powervc.volume.driver.powervc import PowerVCDriver
self.powervc_cinder_driver = PowerVCDriver()
def test_create_volume_no_size_raise_exception(self):
self.assertRaises(exception.InvalidVolume,
self.powervc_cinder_driver.create_volume,
None)
def test_create_volume_succeed(self):
# local volume passed to driver
vol = {'id': 1234,
'size': 1}
volume = Volume(vol)
# fake volume after call creating volume from pvc
ret_vol_after_created = {'id': 4321,
'status': 'creating'}
ret_volume_after_created = Volume(ret_vol_after_created)
# fake volume after call get volume from pvc
ret_vol_get = {'id': 4321,
'status': 'available'}
ret_volume_get = Volume(ret_vol_get)
# mock create volume restAPI
PowerVCService._client.volumes.create = \
mock.MagicMock(return_value=ret_volume_after_created)
# mock get volume restAPI
PowerVCService._client.volumes.get = \
mock.MagicMock(return_value=ret_volume_get)
# mock db access operation
db.volume_update = mock.MagicMock(return_value=None)
dic = self.powervc_cinder_driver.create_volume(volume)
self.assertEqual({'status': 'available',
'metadata': {'pvc:id': 4321}},
dic, "return vol doesn't match")
def test_create_volume_failed(self):
# local volume passed to driver
vol = {'id': 1234,
'size': 1}
volume = Volume(vol)
# fake volume after call creating volume from pvc
ret_vol_after_created = {'id': 4321,
'status': 'creating'}
ret_volume_after_created = Volume(ret_vol_after_created)
# fake volume after call get volume from pvc
ret_vol_get = {'id': 4321,
'status': 'error'}
ret_volume_get = Volume(ret_vol_get)
# mock create volume restAPI
PowerVCService._client.volumes.create = \
mock.MagicMock(return_value=ret_volume_after_created)
# mock get volume restAPI
PowerVCService._client.volumes.get = \
mock.MagicMock(return_value=ret_volume_get)
# mock db access operation
db.volume_update = mock.MagicMock(return_value=None)
dic = self.powervc_cinder_driver.create_volume(volume)
self.assertEqual({'status': 'error',
'metadata': {'pvc:id': 4321}},
dic, "return vol doesn't match")
def test_create_volume_not_found(self):
# local volume passed to driver
vol = {'id': 1234,
'size': 1}
volume = Volume(vol)
# fake volume after call creating volume from pvc
ret_vol_after_created = {'id': 4321,
'status': 'creating'}
ret_volume_after_created = Volume(ret_vol_after_created)
# fake volume after call get volume from pvc
ret_vol_get = {'id': 4321,
'status': 'error'}
ret_volume_get = Volume(ret_vol_get)
# mock create volume restAPI
PowerVCService._client.volumes.create = \
mock.MagicMock(return_value=ret_volume_after_created)
# mock get volume restAPI
# first time raise an exception,
# second time return a error volume
PowerVCService._client.volumes.get = \
mock.MagicMock(side_effect=[exception.NotFound,
ret_volume_get])
# mock db access operation
db.volume_update = mock.MagicMock(return_value=None)
dic = self.powervc_cinder_driver.create_volume(volume)
self.assertEqual({'status': 'error',
'metadata': {'pvc:id': 4321}},
dic, "return vol doesn't match")
def test_delete_volume_success(self):
# fake volume which will be passed to driver service
vol_info = {'id': 1234,
'size': 1}
volume = Volume(vol_info)
setattr(volume, 'volume_metadata', [VolumeMetadataWithPVCID("1234")])
# fake existed volume
existed_vol_info = {"status": 'available', 'id': 1234}
existed_volume_get = Volume(existed_vol_info)
# fake volume after delete
after_delete_vol_info = {"status": '', 'id': 1234}
after_delete_volume_get = Volume(after_delete_vol_info)
# mock rest API
PowerVCService._client.volumes.get = \
mock.MagicMock(side_effect=[existed_volume_get,
after_delete_volume_get])
self.powervc_cinder_driver.delete_volume(volume)
def test_delete_volume_no_powervc_attribute_error(self):
# fake volume which will be passed to driver service
vol_info = {'id': 1234, 'size': 1}
volume = Volume(vol_info)
self.assertRaises(AttributeError,
self.powervc_cinder_driver.delete_volume,
volume)
def test_delete_volume_not_found_exception(self):
vol_info = {'id': 1234, 'size': 1}
volume = Volume(vol_info)
setattr(volume, 'volume_metadata', [VolumeMetadataWithPVCID("1234")])
PowerVCService._client.volumes.get = \
mock.MagicMock(side_effect=exception.NotFound())
self.assertRaises(exception.NotFound,
self.powervc_cinder_driver.delete_volume,
volume)
def test_get_volume_stats(self):
# fake a storage provider list
ret_sp = [StorageProvider(i) for i in range(10)]
# mock rest api
PowerVCService._client.storage_providers.list = \
mock.MagicMock(return_value=ret_sp)
# fake a expected return dictionary
expected_ret_dic = {}
expected_ret_dic["volume_backend_name"] = 'powervc'
expected_ret_dic["vendor_name"] = 'IBM'
expected_ret_dic["driver_version"] = 1.0
expected_ret_dic["storage_protocol"] = 'Openstack'
expected_ret_dic['total_capacity_gb'] = 550
expected_ret_dic['free_capacity_gb'] = 275
expected_ret_dic['reserved_percentage'] = 0
expected_ret_dic['QoS_support'] = False
ret_dic = self.powervc_cinder_driver.get_volume_stats(True)
self.assertEqual(expected_ret_dic,
ret_dic,
'return stats should be matched')
|
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script to help run clang tools across Chromium code.
How to use this tool:
If you want to run the tool across all Chromium code:
run_tool.py <tool> <path/to/compiledb>
If you only want to run the tool across just chrome/browser and content/browser:
run_tool.py <tool> <path/to/compiledb> chrome/browser content/browser
Please see https://code.google.com/p/chromium/wiki/ClangToolRefactoring for more
information, which documents the entire automated refactoring flow in Chromium.
Why use this tool:
The clang tool implementation doesn't take advantage of multiple cores, and if
it fails mysteriously in the middle, all the generated replacements will be
lost.
Unfortunately, if the work is simply sharded across multiple cores by running
multiple RefactoringTools, problems arise when they attempt to rewrite a file at
the same time. To work around that, clang tools that are run using this tool
should output edits to stdout in the following format:
==== BEGIN EDITS ====
r:<file path>:<offset>:<length>:<replacement text>
r:<file path>:<offset>:<length>:<replacement text>
...etc...
==== END EDITS ====
Any generated edits are applied once the clang tool has finished running
across Chromium, regardless of whether some instances failed or not.
"""
import collections
import functools
import multiprocessing
import os.path
import subprocess
import sys
Edit = collections.namedtuple(
'Edit', ('edit_type', 'offset', 'length', 'replacement'))
def _GetFilesFromGit(paths = None):
"""Gets the list of files in the git repository.
Args:
paths: Prefix filter for the returned paths. May contain multiple entries.
"""
args = ['git', 'ls-files']
if paths:
args.extend(paths)
command = subprocess.Popen(args, stdout=subprocess.PIPE)
output, _ = command.communicate()
return output.splitlines()
def _ExtractEditsFromStdout(build_directory, stdout):
"""Extracts generated list of edits from the tool's stdout.
The expected format is documented at the top of this file.
Args:
build_directory: Directory that contains the compile database. Used to
normalize the filenames.
stdout: The stdout from running the clang tool.
Returns:
A dictionary mapping filenames to the associated edits.
"""
lines = stdout.splitlines()
start_index = lines.index('==== BEGIN EDITS ====')
end_index = lines.index('==== END EDITS ====')
edits = collections.defaultdict(list)
for line in lines[start_index + 1:end_index]:
try:
edit_type, path, offset, length, replacement = line.split(':', 4)
# Normalize the file path emitted by the clang tool to be relative to the
# current working directory.
path = os.path.relpath(os.path.join(build_directory, path))
edits[path].append(Edit(edit_type, int(offset), int(length), replacement))
except ValueError:
print 'Unable to parse edit: %s' % line
return edits
def _ExecuteTool(toolname, build_directory, filename):
"""Executes the tool.
This is defined outside the class so it can be pickled for the multiprocessing
module.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filename: The file to run the tool over.
Returns:
A dictionary that must contain the key "status" and a boolean value
associated with it.
If status is True, then the generated edits are stored with the key "edits"
in the dictionary.
Otherwise, the filename and the output from stderr are associated with the
keys "filename" and "stderr" respectively.
"""
command = subprocess.Popen((toolname, '-p', build_directory, filename),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = command.communicate()
if command.returncode != 0:
return {'status': False, 'filename': filename, 'stderr': stderr}
else:
return {'status': True,
'edits': _ExtractEditsFromStdout(build_directory, stdout)}
class _CompilerDispatcher(object):
"""Multiprocessing controller for running clang tools in parallel."""
def __init__(self, toolname, build_directory, filenames):
"""Initializer method.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filenames: The files to run the tool over.
"""
self.__toolname = toolname
self.__build_directory = build_directory
self.__filenames = filenames
self.__success_count = 0
self.__failed_count = 0
self.__edits = collections.defaultdict(list)
@property
def edits(self):
return self.__edits
@property
def failed_count(self):
return self.__failed_count
def Run(self):
"""Does the grunt work."""
pool = multiprocessing.Pool()
result_iterator = pool.imap_unordered(
functools.partial(_ExecuteTool, self.__toolname,
self.__build_directory),
self.__filenames)
for result in result_iterator:
self.__ProcessResult(result)
sys.stdout.write('\n')
sys.stdout.flush()
def __ProcessResult(self, result):
"""Handles result processing.
Args:
result: The result dictionary returned by _ExecuteTool.
"""
if result['status']:
self.__success_count += 1
for k, v in result['edits'].iteritems():
self.__edits[k].extend(v)
else:
self.__failed_count += 1
sys.stdout.write('\nFailed to process %s\n' % result['filename'])
sys.stdout.write(result['stderr'])
sys.stdout.write('\n')
percentage = (
float(self.__success_count + self.__failed_count) /
len(self.__filenames)) * 100
sys.stdout.write('Succeeded: %d, Failed: %d [%.2f%%]\r' % (
self.__success_count, self.__failed_count, percentage))
sys.stdout.flush()
def _ApplyEdits(edits, clang_format_diff_path):
"""Apply the generated edits.
Args:
edits: A dict mapping filenames to Edit instances that apply to that file.
clang_format_diff_path: Path to the clang-format-diff.py helper to help
automatically reformat diffs to avoid style violations. Pass None if the
clang-format step should be skipped.
"""
edit_count = 0
for k, v in edits.iteritems():
# Sort the edits and iterate through them in reverse order. Sorting allows
# duplicate edits to be quickly skipped, while reversing means that
# subsequent edits don't need to have their offsets updated with each edit
# applied.
v.sort()
last_edit = None
with open(k, 'rb+') as f:
contents = bytearray(f.read())
for edit in reversed(v):
if edit == last_edit:
continue
last_edit = edit
contents[edit.offset:edit.offset + edit.length] = edit.replacement
if not edit.replacement:
_ExtendDeletionIfElementIsInList(contents, edit.offset)
edit_count += 1
f.seek(0)
f.truncate()
f.write(contents)
if clang_format_diff_path:
if subprocess.call('git diff -U0 %s | python %s -style=Chromium' % (
k, clang_format_diff_path), shell=True) != 0:
print 'clang-format failed for %s' % k
print 'Applied %d edits to %d files' % (edit_count, len(edits))
_WHITESPACE_BYTES = frozenset((ord('\t'), ord('\n'), ord('\r'), ord(' ')))
def _ExtendDeletionIfElementIsInList(contents, offset):
"""Extends the range of a deletion if the deleted element was part of a list.
This rewriter helper makes it easy for refactoring tools to remove elements
from a list. Even if a matcher callback knows that it is removing an element
from a list, it may not have enough information to accurately remove the list
element; for example, another matcher callback may end up removing an adjacent
list element, or all the list elements may end up being removed.
With this helper, refactoring tools can simply remove the list element and not
worry about having to include the comma in the replacement.
Args:
contents: A bytearray with the deletion already applied.
offset: The offset in the bytearray where the deleted range used to be.
"""
char_before = char_after = None
left_trim_count = 0
for byte in reversed(contents[:offset]):
left_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte in (ord(','), ord(':'), ord('('), ord('{')):
char_before = chr(byte)
break
right_trim_count = 0
for byte in contents[offset:]:
right_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte == ord(','):
char_after = chr(byte)
break
if char_before:
if char_after:
del contents[offset:offset + right_trim_count]
elif char_before in (',', ':'):
del contents[offset - left_trim_count:offset]
def main(argv):
if len(argv) < 2:
print 'Usage: run_tool.py <clang tool> <compile DB> <path 1> <path 2> ...'
print ' <clang tool> is the clang tool that should be run.'
print ' <compile db> is the directory that contains the compile database'
print ' <path 1> <path2> ... can be used to filter what files are edited'
return 1
clang_format_diff_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../../third_party/llvm/tools/clang/tools/clang-format',
'clang-format-diff.py')
# TODO(dcheng): Allow this to be controlled with a flag as well.
if not os.path.isfile(clang_format_diff_path):
clang_format_diff_path = None
filenames = frozenset(_GetFilesFromGit(argv[2:]))
# Filter out files that aren't C/C++/Obj-C/Obj-C++.
extensions = frozenset(('.c', '.cc', '.m', '.mm'))
dispatcher = _CompilerDispatcher(argv[0], argv[1],
[f for f in filenames
if os.path.splitext(f)[1] in extensions])
dispatcher.Run()
# Filter out edits to files that aren't in the git repository, since it's not
# useful to modify files that aren't under source control--typically, these
# are generated files or files in a git submodule that's not part of Chromium.
_ApplyEdits({k : v for k, v in dispatcher.edits.iteritems()
if k in filenames},
clang_format_diff_path)
if dispatcher.failed_count != 0:
return 2
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Subsets and Splits