repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
bufferx/tornado | tornado/test/web_test.py | 1 | 68872 | from __future__ import absolute_import, division, print_function, with_statement
from tornado import gen
from tornado.escape import json_decode, utf8, to_unicode, recursive_unicode, native_str, to_basestring
from tornado.httputil import format_timestamp
from tornado.iostream import IOStream
from tornado.log import app_log, gen_log
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.template import DictLoader
from tornado.testing import AsyncHTTPTestCase, ExpectLog
from tornado.test.util import unittest
from tornado.util import u, bytes_type, ObjectDict, unicode_type
from tornado.web import RequestHandler, authenticated, Application, asynchronous, url, HTTPError, StaticFileHandler, _create_signature, create_signed_value, ErrorHandler, UIModule, MissingArgumentError
import binascii
import datetime
import email.utils
import logging
import os
import re
import socket
import sys
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
wsgi_safe_tests = []
relpath = lambda *a: os.path.join(os.path.dirname(__file__), *a)
def wsgi_safe(cls):
wsgi_safe_tests.append(cls)
return cls
class WebTestCase(AsyncHTTPTestCase):
"""Base class for web tests that also supports WSGI mode.
Override get_handlers and get_app_kwargs instead of get_app.
Append to wsgi_safe to have it run in wsgi_test as well.
"""
def get_app(self):
self.app = Application(self.get_handlers(), **self.get_app_kwargs())
return self.app
def get_handlers(self):
raise NotImplementedError()
def get_app_kwargs(self):
return {}
class SimpleHandlerTestCase(WebTestCase):
"""Simplified base class for tests that work with a single handler class.
To use, define a nested class named ``Handler``.
"""
def get_handlers(self):
return [('/', self.Handler)]
class HelloHandler(RequestHandler):
def get(self):
self.write('hello')
class CookieTestRequestHandler(RequestHandler):
# stub out enough methods to make the secure_cookie functions work
def __init__(self):
# don't call super.__init__
self._cookies = {}
self.application = ObjectDict(settings=dict(cookie_secret='0123456789'))
def get_cookie(self, name):
return self._cookies.get(name)
def set_cookie(self, name, value, expires_days=None):
self._cookies[name] = value
class SecureCookieTest(unittest.TestCase):
def test_round_trip(self):
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'bar')
self.assertEqual(handler.get_secure_cookie('foo'), b'bar')
def test_cookie_tampering_future_timestamp(self):
handler = CookieTestRequestHandler()
# this string base64-encodes to '12345678'
handler.set_secure_cookie('foo', binascii.a2b_hex(b'd76df8e7aefc'))
cookie = handler._cookies['foo']
match = re.match(br'12345678\|([0-9]+)\|([0-9a-f]+)', cookie)
self.assertTrue(match)
timestamp = match.group(1)
sig = match.group(2)
self.assertEqual(
_create_signature(handler.application.settings["cookie_secret"],
'foo', '12345678', timestamp),
sig)
# shifting digits from payload to timestamp doesn't alter signature
# (this is not desirable behavior, just confirming that that's how it
# works)
self.assertEqual(
_create_signature(handler.application.settings["cookie_secret"],
'foo', '1234', b'5678' + timestamp),
sig)
# tamper with the cookie
handler._cookies['foo'] = utf8('1234|5678%s|%s' % (
to_basestring(timestamp), to_basestring(sig)))
# it gets rejected
with ExpectLog(gen_log, "Cookie timestamp in future"):
self.assertTrue(handler.get_secure_cookie('foo') is None)
def test_arbitrary_bytes(self):
# Secure cookies accept arbitrary data (which is base64 encoded).
# Note that normal cookies accept only a subset of ascii.
handler = CookieTestRequestHandler()
handler.set_secure_cookie('foo', b'\xe9')
self.assertEqual(handler.get_secure_cookie('foo'), b'\xe9')
class CookieTest(WebTestCase):
def get_handlers(self):
class SetCookieHandler(RequestHandler):
def get(self):
# Try setting cookies with different argument types
# to ensure that everything gets encoded correctly
self.set_cookie("str", "asdf")
self.set_cookie("unicode", u("qwer"))
self.set_cookie("bytes", b"zxcv")
class GetCookieHandler(RequestHandler):
def get(self):
self.write(self.get_cookie("foo", "default"))
class SetCookieDomainHandler(RequestHandler):
def get(self):
# unicode domain and path arguments shouldn't break things
# either (see bug #285)
self.set_cookie("unicode_args", "blah", domain=u("foo.com"),
path=u("/foo"))
class SetCookieSpecialCharHandler(RequestHandler):
def get(self):
self.set_cookie("equals", "a=b")
self.set_cookie("semicolon", "a;b")
self.set_cookie("quote", 'a"b')
class SetCookieOverwriteHandler(RequestHandler):
def get(self):
self.set_cookie("a", "b", domain="example.com")
self.set_cookie("c", "d", domain="example.com")
# A second call with the same name clobbers the first.
# Attributes from the first call are not carried over.
self.set_cookie("a", "e")
return [("/set", SetCookieHandler),
("/get", GetCookieHandler),
("/set_domain", SetCookieDomainHandler),
("/special_char", SetCookieSpecialCharHandler),
("/set_overwrite", SetCookieOverwriteHandler),
]
def test_set_cookie(self):
response = self.fetch("/set")
self.assertEqual(sorted(response.headers.get_list("Set-Cookie")),
["bytes=zxcv; Path=/",
"str=asdf; Path=/",
"unicode=qwer; Path=/",
])
def test_get_cookie(self):
response = self.fetch("/get", headers={"Cookie": "foo=bar"})
self.assertEqual(response.body, b"bar")
response = self.fetch("/get", headers={"Cookie": 'foo="bar"'})
self.assertEqual(response.body, b"bar")
response = self.fetch("/get", headers={"Cookie": "/=exception;"})
self.assertEqual(response.body, b"default")
def test_set_cookie_domain(self):
response = self.fetch("/set_domain")
self.assertEqual(response.headers.get_list("Set-Cookie"),
["unicode_args=blah; Domain=foo.com; Path=/foo"])
def test_cookie_special_char(self):
response = self.fetch("/special_char")
headers = sorted(response.headers.get_list("Set-Cookie"))
self.assertEqual(len(headers), 3)
self.assertEqual(headers[0], 'equals="a=b"; Path=/')
self.assertEqual(headers[1], 'quote="a\\"b"; Path=/')
# python 2.7 octal-escapes the semicolon; older versions leave it alone
self.assertTrue(headers[2] in ('semicolon="a;b"; Path=/',
'semicolon="a\\073b"; Path=/'),
headers[2])
data = [('foo=a=b', 'a=b'),
('foo="a=b"', 'a=b'),
('foo="a;b"', 'a;b'),
# ('foo=a\\073b', 'a;b'), # even encoded, ";" is a delimiter
('foo="a\\073b"', 'a;b'),
('foo="a\\"b"', 'a"b'),
]
for header, expected in data:
logging.debug("trying %r", header)
response = self.fetch("/get", headers={"Cookie": header})
self.assertEqual(response.body, utf8(expected))
def test_set_cookie_overwrite(self):
response = self.fetch("/set_overwrite")
headers = response.headers.get_list("Set-Cookie")
self.assertEqual(sorted(headers),
["a=e; Path=/", "c=d; Domain=example.com; Path=/"])
class AuthRedirectRequestHandler(RequestHandler):
def initialize(self, login_url):
self.login_url = login_url
def get_login_url(self):
return self.login_url
@authenticated
def get(self):
# we'll never actually get here because the test doesn't follow redirects
self.send_error(500)
class AuthRedirectTest(WebTestCase):
def get_handlers(self):
return [('/relative', AuthRedirectRequestHandler,
dict(login_url='/login')),
('/absolute', AuthRedirectRequestHandler,
dict(login_url='http://example.com/login'))]
def test_relative_auth_redirect(self):
self.http_client.fetch(self.get_url('/relative'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertEqual(response.headers['Location'], '/login?next=%2Frelative')
def test_absolute_auth_redirect(self):
self.http_client.fetch(self.get_url('/absolute'), self.stop,
follow_redirects=False)
response = self.wait()
self.assertEqual(response.code, 302)
self.assertTrue(re.match(
'http://example.com/login\?next=http%3A%2F%2Flocalhost%3A[0-9]+%2Fabsolute',
response.headers['Location']), response.headers['Location'])
class ConnectionCloseHandler(RequestHandler):
def initialize(self, test):
self.test = test
@asynchronous
def get(self):
self.test.on_handler_waiting()
def on_connection_close(self):
self.test.on_connection_close()
class ConnectionCloseTest(WebTestCase):
def get_handlers(self):
return [('/', ConnectionCloseHandler, dict(test=self))]
def test_connection_close(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("localhost", self.get_http_port()))
self.stream = IOStream(s, io_loop=self.io_loop)
self.stream.write(b"GET / HTTP/1.0\r\n\r\n")
self.wait()
def on_handler_waiting(self):
logging.debug('handler waiting')
self.stream.close()
def on_connection_close(self):
logging.debug('connection closed')
self.stop()
class EchoHandler(RequestHandler):
def get(self, *path_args):
# Type checks: web.py interfaces convert argument values to
# unicode strings (by default, but see also decode_argument).
# In httpserver.py (i.e. self.request.arguments), they're left
# as bytes. Keys are always native strings.
for key in self.request.arguments:
if type(key) != str:
raise Exception("incorrect type for key: %r" % type(key))
for value in self.request.arguments[key]:
if type(value) != bytes_type:
raise Exception("incorrect type for value: %r" %
type(value))
for value in self.get_arguments(key):
if type(value) != unicode_type:
raise Exception("incorrect type for value: %r" %
type(value))
for arg in path_args:
if type(arg) != unicode_type:
raise Exception("incorrect type for path arg: %r" % type(arg))
self.write(dict(path=self.request.path,
path_args=path_args,
args=recursive_unicode(self.request.arguments)))
class RequestEncodingTest(WebTestCase):
def get_handlers(self):
return [("/group/(.*)", EchoHandler),
("/slashes/([^/]*)/([^/]*)", EchoHandler),
]
def fetch_json(self, path):
return json_decode(self.fetch(path).body)
def test_group_question_mark(self):
# Ensure that url-encoded question marks are handled properly
self.assertEqual(self.fetch_json('/group/%3F'),
dict(path='/group/%3F', path_args=['?'], args={}))
self.assertEqual(self.fetch_json('/group/%3F?%3F=%3F'),
dict(path='/group/%3F', path_args=['?'], args={'?': ['?']}))
def test_group_encoding(self):
# Path components and query arguments should be decoded the same way
self.assertEqual(self.fetch_json('/group/%C3%A9?arg=%C3%A9'),
{u("path"): u("/group/%C3%A9"),
u("path_args"): [u("\u00e9")],
u("args"): {u("arg"): [u("\u00e9")]}})
def test_slashes(self):
# Slashes may be escaped to appear as a single "directory" in the path,
# but they are then unescaped when passed to the get() method.
self.assertEqual(self.fetch_json('/slashes/foo/bar'),
dict(path="/slashes/foo/bar",
path_args=["foo", "bar"],
args={}))
self.assertEqual(self.fetch_json('/slashes/a%2Fb/c%2Fd'),
dict(path="/slashes/a%2Fb/c%2Fd",
path_args=["a/b", "c/d"],
args={}))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
self.check_type('status', self.get_status(), int)
# get_argument is an exception from the general rule of using
# type str for non-body data mainly for historical reasons.
self.check_type('argument', self.get_argument('foo'), unicode_type)
self.check_type('cookie_key', list(self.cookies.keys())[0], str)
self.check_type('cookie_value', list(self.cookies.values())[0].value, str)
# Secure cookies return bytes because they can contain arbitrary
# data, but regular cookies are native strings.
if list(self.cookies.keys()) != ['asdf']:
raise Exception("unexpected values for cookie keys: %r" %
self.cookies.keys())
self.check_type('get_secure_cookie', self.get_secure_cookie('asdf'), bytes_type)
self.check_type('get_cookie', self.get_cookie('asdf'), str)
self.check_type('xsrf_token', self.xsrf_token, bytes_type)
self.check_type('xsrf_form_html', self.xsrf_form_html(), str)
self.check_type('reverse_url', self.reverse_url('typecheck', 'foo'), str)
self.check_type('request_summary', self._request_summary(), str)
def get(self, path_component):
# path_component uses type unicode instead of str for consistency
# with get_argument()
self.check_type('path_component', path_component, unicode_type)
self.write(self.errors)
def post(self, path_component):
self.check_type('path_component', path_component, unicode_type)
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class DecodeArgHandler(RequestHandler):
def decode_argument(self, value, name=None):
if type(value) != bytes_type:
raise Exception("unexpected type for value: %r" % type(value))
# use self.request.arguments directly to avoid recursion
if 'encoding' in self.request.arguments:
return value.decode(to_unicode(self.request.arguments['encoding'][0]))
else:
return value
def get(self, arg):
def describe(s):
if type(s) == bytes_type:
return ["bytes", native_str(binascii.b2a_hex(s))]
elif type(s) == unicode_type:
return ["unicode", s]
raise Exception("unknown type")
self.write({'path': describe(arg),
'query': describe(self.get_argument("foo")),
})
class LinkifyHandler(RequestHandler):
def get(self):
self.render("linkify.html", message="http://example.com")
class UIModuleResourceHandler(RequestHandler):
def get(self):
self.render("page.html", entries=[1, 2])
class OptionalPathHandler(RequestHandler):
def get(self, path):
self.write({"path": path})
class FlowControlHandler(RequestHandler):
# These writes are too small to demonstrate real flow control,
# but at least it shows that the callbacks get run.
@asynchronous
def get(self):
self.write("1")
self.flush(callback=self.step2)
def step2(self):
self.write("2")
self.flush(callback=self.step3)
def step3(self):
self.write("3")
self.finish()
class MultiHeaderHandler(RequestHandler):
def get(self):
self.set_header("x-overwrite", "1")
self.set_header("X-Overwrite", 2)
self.add_header("x-multi", 3)
self.add_header("X-Multi", "4")
class RedirectHandler(RequestHandler):
def get(self):
if self.get_argument('permanent', None) is not None:
self.redirect('/', permanent=int(self.get_argument('permanent')))
elif self.get_argument('status', None) is not None:
self.redirect('/', status=int(self.get_argument('status')))
else:
raise Exception("didn't get permanent or status arguments")
class EmptyFlushCallbackHandler(RequestHandler):
@gen.engine
@asynchronous
def get(self):
# Ensure that the flush callback is run whether or not there
# was any output.
yield gen.Task(self.flush) # "empty" flush, but writes headers
yield gen.Task(self.flush) # empty flush
self.write("o")
yield gen.Task(self.flush) # flushes the "o"
yield gen.Task(self.flush) # empty flush
self.finish("k")
class HeaderInjectionHandler(RequestHandler):
def get(self):
try:
self.set_header("X-Foo", "foo\r\nX-Bar: baz")
raise Exception("Didn't get expected exception")
except ValueError as e:
if "Unsafe header value" in str(e):
self.finish(b"ok")
else:
raise
class GetArgumentHandler(RequestHandler):
def prepare(self):
if self.get_argument('source', None) == 'query':
method = self.get_query_argument
elif self.get_argument('source', None) == 'body':
method = self.get_body_argument
else:
method = self.get_argument
self.finish(method("foo", "default"))
class GetArgumentsHandler(RequestHandler):
def prepare(self):
self.finish(dict(default=self.get_arguments("foo"),
query=self.get_query_arguments("foo"),
body=self.get_body_arguments("foo")))
# This test is shared with wsgi_test.py
@wsgi_safe
class WSGISafeWebTest(WebTestCase):
COOKIE_SECRET = "WebTest.COOKIE_SECRET"
def get_app_kwargs(self):
loader = DictLoader({
"linkify.html": "{% module linkify(message) %}",
"page.html": """\
<html><head></head><body>
{% for e in entries %}
{% module Template("entry.html", entry=e) %}
{% end %}
</body></html>""",
"entry.html": """\
{{ set_resources(embedded_css=".entry { margin-bottom: 1em; }", embedded_javascript="js_embed()", css_files=["/base.css", "/foo.css"], javascript_files="/common.js", html_head="<meta>", html_body='<script src="/analytics.js"/>') }}
<div class="entry">...</div>""",
})
return dict(template_loader=loader,
autoescape="xhtml_escape",
cookie_secret=self.COOKIE_SECRET)
def tearDown(self):
super(WSGISafeWebTest, self).tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
urls = [
url("/typecheck/(.*)", TypeCheckHandler, name='typecheck'),
url("/decode_arg/(.*)", DecodeArgHandler, name='decode_arg'),
url("/decode_arg_kw/(?P<arg>.*)", DecodeArgHandler),
url("/linkify", LinkifyHandler),
url("/uimodule_resources", UIModuleResourceHandler),
url("/optional_path/(.+)?", OptionalPathHandler),
url("/multi_header", MultiHeaderHandler),
url("/redirect", RedirectHandler),
url("/header_injection", HeaderInjectionHandler),
url("/get_argument", GetArgumentHandler),
url("/get_arguments", GetArgumentsHandler),
]
return urls
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
def test_types(self):
cookie_value = to_unicode(create_signed_value(self.COOKIE_SECRET,
"asdf", "qwer"))
response = self.fetch("/typecheck/asdf?foo=bar",
headers={"Cookie": "asdf=" + cookie_value})
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck/asdf?foo=bar", method="POST",
headers={"Cookie": "asdf=" + cookie_value},
body="foo=bar")
def test_decode_argument(self):
# These urls all decode to the same thing
urls = ["/decode_arg/%C3%A9?foo=%C3%A9&encoding=utf-8",
"/decode_arg/%E9?foo=%E9&encoding=latin1",
"/decode_arg_kw/%E9?foo=%E9&encoding=latin1",
]
for url in urls:
response = self.fetch(url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('unicode'), u('\u00e9')],
u('query'): [u('unicode'), u('\u00e9')],
})
response = self.fetch("/decode_arg/%C3%A9?foo=%C3%A9")
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('bytes'), u('c3a9')],
u('query'): [u('bytes'), u('c3a9')],
})
def test_decode_argument_plus(self):
# These urls are all equivalent.
urls = ["/decode_arg/1%20%2B%201?foo=1%20%2B%201&encoding=utf-8",
"/decode_arg/1%20+%201?foo=1+%2B+1&encoding=utf-8"]
for url in urls:
response = self.fetch(url)
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {u('path'): [u('unicode'), u('1 + 1')],
u('query'): [u('unicode'), u('1 + 1')],
})
def test_reverse_url(self):
self.assertEqual(self.app.reverse_url('decode_arg', 'foo'),
'/decode_arg/foo')
self.assertEqual(self.app.reverse_url('decode_arg', 42),
'/decode_arg/42')
self.assertEqual(self.app.reverse_url('decode_arg', b'\xe9'),
'/decode_arg/%E9')
self.assertEqual(self.app.reverse_url('decode_arg', u('\u00e9')),
'/decode_arg/%C3%A9')
self.assertEqual(self.app.reverse_url('decode_arg', '1 + 1'),
'/decode_arg/1%20%2B%201')
def test_uimodule_unescaped(self):
response = self.fetch("/linkify")
self.assertEqual(response.body,
b"<a href=\"http://example.com\">http://example.com</a>")
def test_uimodule_resources(self):
response = self.fetch("/uimodule_resources")
self.assertEqual(response.body, b"""\
<html><head><link href="/base.css" type="text/css" rel="stylesheet"/><link href="/foo.css" type="text/css" rel="stylesheet"/>
<style type="text/css">
.entry { margin-bottom: 1em; }
</style>
<meta>
</head><body>
<div class="entry">...</div>
<div class="entry">...</div>
<script src="/common.js" type="text/javascript"></script>
<script type="text/javascript">
//<![CDATA[
js_embed()
//]]>
</script>
<script src="/analytics.js"/>
</body></html>""")
def test_optional_path(self):
self.assertEqual(self.fetch_json("/optional_path/foo"),
{u("path"): u("foo")})
self.assertEqual(self.fetch_json("/optional_path/"),
{u("path"): None})
def test_multi_header(self):
response = self.fetch("/multi_header")
self.assertEqual(response.headers["x-overwrite"], "2")
self.assertEqual(response.headers.get_list("x-multi"), ["3", "4"])
def test_redirect(self):
response = self.fetch("/redirect?permanent=1", follow_redirects=False)
self.assertEqual(response.code, 301)
response = self.fetch("/redirect?permanent=0", follow_redirects=False)
self.assertEqual(response.code, 302)
response = self.fetch("/redirect?status=307", follow_redirects=False)
self.assertEqual(response.code, 307)
def test_header_injection(self):
response = self.fetch("/header_injection")
self.assertEqual(response.body, b"ok")
def test_get_argument(self):
response = self.fetch("/get_argument?foo=bar")
self.assertEqual(response.body, b"bar")
response = self.fetch("/get_argument?foo=")
self.assertEqual(response.body, b"")
response = self.fetch("/get_argument")
self.assertEqual(response.body, b"default")
# Test merging of query and body arguments.
# In singular form, body arguments take precedence over query arguments.
body = urllib_parse.urlencode(dict(foo="hello"))
response = self.fetch("/get_argument?foo=bar", method="POST", body=body)
self.assertEqual(response.body, b"hello")
# In plural methods they are merged.
response = self.fetch("/get_arguments?foo=bar",
method="POST", body=body)
self.assertEqual(json_decode(response.body),
dict(default=['bar', 'hello'],
query=['bar'],
body=['hello']))
def test_get_query_arguments(self):
# send as a post so we can ensure the separation between query
# string and body arguments.
body = urllib_parse.urlencode(dict(foo="hello"))
response = self.fetch("/get_argument?source=query&foo=bar",
method="POST", body=body)
self.assertEqual(response.body, b"bar")
response = self.fetch("/get_argument?source=query&foo=",
method="POST", body=body)
self.assertEqual(response.body, b"")
response = self.fetch("/get_argument?source=query",
method="POST", body=body)
self.assertEqual(response.body, b"default")
def test_get_body_arguments(self):
body = urllib_parse.urlencode(dict(foo="bar"))
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"bar")
body = urllib_parse.urlencode(dict(foo=""))
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"")
body = urllib_parse.urlencode(dict())
response = self.fetch("/get_argument?source=body&foo=hello",
method="POST", body=body)
self.assertEqual(response.body, b"default")
def test_no_gzip(self):
response = self.fetch('/get_argument')
self.assertNotIn('Accept-Encoding', response.headers.get('Vary', ''))
self.assertNotIn('gzip', response.headers.get('Content-Encoding', ''))
class NonWSGIWebTests(WebTestCase):
def get_handlers(self):
return [("/flow_control", FlowControlHandler),
("/empty_flush", EmptyFlushCallbackHandler),
]
def test_flow_control(self):
self.assertEqual(self.fetch("/flow_control").body, b"123")
def test_empty_flush(self):
response = self.fetch("/empty_flush")
self.assertEqual(response.body, b"ok")
@wsgi_safe
class ErrorResponseTest(WebTestCase):
def get_handlers(self):
class DefaultHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
raise HTTPError(int(self.get_argument("status")))
1 / 0
class WriteErrorHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1 / 0
def write_error(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exc_info" in kwargs:
self.write("Exception: %s" % kwargs["exc_info"][0].__name__)
else:
self.write("Status: %d" % status_code)
class GetErrorHtmlHandler(RequestHandler):
def get(self):
if self.get_argument("status", None):
self.send_error(int(self.get_argument("status")))
else:
1 / 0
def get_error_html(self, status_code, **kwargs):
self.set_header("Content-Type", "text/plain")
if "exception" in kwargs:
self.write("Exception: %s" % sys.exc_info()[0].__name__)
else:
self.write("Status: %d" % status_code)
class FailedWriteErrorHandler(RequestHandler):
def get(self):
1 / 0
def write_error(self, status_code, **kwargs):
raise Exception("exception in write_error")
return [url("/default", DefaultHandler),
url("/write_error", WriteErrorHandler),
url("/get_error_html", GetErrorHtmlHandler),
url("/failed_write_error", FailedWriteErrorHandler),
]
def test_default(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/default")
self.assertEqual(response.code, 500)
self.assertTrue(b"500: Internal Server Error" in response.body)
response = self.fetch("/default?status=503")
self.assertEqual(response.code, 503)
self.assertTrue(b"503: Service Unavailable" in response.body)
def test_write_error(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b"Exception: ZeroDivisionError", response.body)
response = self.fetch("/write_error?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b"Status: 503", response.body)
def test_get_error_html(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/get_error_html")
self.assertEqual(response.code, 500)
self.assertEqual(b"Exception: ZeroDivisionError", response.body)
response = self.fetch("/get_error_html?status=503")
self.assertEqual(response.code, 503)
self.assertEqual(b"Status: 503", response.body)
def test_failed_write_error(self):
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/failed_write_error")
self.assertEqual(response.code, 500)
self.assertEqual(b"", response.body)
@wsgi_safe
class StaticFileTest(WebTestCase):
# The expected MD5 hash of robots.txt, used in tests that call
# StaticFileHandler.get_version
robots_txt_hash = b"f71d20196d4caf35b6a670db8c70b03d"
static_dir = os.path.join(os.path.dirname(__file__), 'static')
def get_handlers(self):
class StaticUrlHandler(RequestHandler):
def get(self, path):
with_v = int(self.get_argument('include_version', 1))
self.write(self.static_url(path, include_version=with_v))
class AbsoluteStaticUrlHandler(StaticUrlHandler):
include_host = True
class OverrideStaticUrlHandler(RequestHandler):
def get(self, path):
do_include = bool(self.get_argument("include_host"))
self.include_host = not do_include
regular_url = self.static_url(path)
override_url = self.static_url(path, include_host=do_include)
if override_url == regular_url:
return self.write(str(False))
protocol = self.request.protocol + "://"
protocol_length = len(protocol)
check_regular = regular_url.find(protocol, 0, protocol_length)
check_override = override_url.find(protocol, 0, protocol_length)
if do_include:
result = (check_override == 0 and check_regular == -1)
else:
result = (check_override == -1 and check_regular == 0)
self.write(str(result))
return [('/static_url/(.*)', StaticUrlHandler),
('/abs_static_url/(.*)', AbsoluteStaticUrlHandler),
('/override_static_url/(.*)', OverrideStaticUrlHandler)]
def get_app_kwargs(self):
return dict(static_path=relpath('static'))
def test_static_files(self):
response = self.fetch('/robots.txt')
self.assertTrue(b"Disallow: /" in response.body)
response = self.fetch('/static/robots.txt')
self.assertTrue(b"Disallow: /" in response.body)
def test_static_url(self):
response = self.fetch("/static_url/robots.txt")
self.assertEqual(response.body,
b"/static/robots.txt?v=" + self.robots_txt_hash)
def test_absolute_static_url(self):
response = self.fetch("/abs_static_url/robots.txt")
self.assertEqual(response.body, (
utf8(self.get_url("/")) +
b"static/robots.txt?v=" +
self.robots_txt_hash
))
def test_relative_version_exclusion(self):
response = self.fetch("/static_url/robots.txt?include_version=0")
self.assertEqual(response.body, b"/static/robots.txt")
def test_absolute_version_exclusion(self):
response = self.fetch("/abs_static_url/robots.txt?include_version=0")
self.assertEqual(response.body,
utf8(self.get_url("/") + "static/robots.txt"))
def test_include_host_override(self):
self._trigger_include_host_check(False)
self._trigger_include_host_check(True)
def _trigger_include_host_check(self, include_host):
path = "/override_static_url/robots.txt?include_host=%s"
response = self.fetch(path % int(include_host))
self.assertEqual(response.body, utf8(str(True)))
def test_static_304_if_modified_since(self):
response1 = self.fetch("/static/robots.txt")
response2 = self.fetch("/static/robots.txt", headers={
'If-Modified-Since': response1.headers['Last-Modified']})
self.assertEqual(response2.code, 304)
self.assertTrue('Content-Length' not in response2.headers)
self.assertTrue('Last-Modified' not in response2.headers)
def test_static_304_if_none_match(self):
response1 = self.fetch("/static/robots.txt")
response2 = self.fetch("/static/robots.txt", headers={
'If-None-Match': response1.headers['Etag']})
self.assertEqual(response2.code, 304)
def test_static_if_modified_since_pre_epoch(self):
# On windows, the functions that work with time_t do not accept
# negative values, and at least one client (processing.js) seems
# to use if-modified-since 1/1/1960 as a cache-busting technique.
response = self.fetch("/static/robots.txt", headers={
'If-Modified-Since': 'Fri, 01 Jan 1960 00:00:00 GMT'})
self.assertEqual(response.code, 200)
def test_static_if_modified_since_time_zone(self):
# Instead of the value from Last-Modified, make requests with times
# chosen just before and after the known modification time
# of the file to ensure that the right time zone is being used
# when parsing If-Modified-Since.
stat = os.stat(relpath('static/robots.txt'))
response = self.fetch('/static/robots.txt', headers={
'If-Modified-Since': format_timestamp(stat.st_mtime - 1)})
self.assertEqual(response.code, 200)
response = self.fetch('/static/robots.txt', headers={
'If-Modified-Since': format_timestamp(stat.st_mtime + 1)})
self.assertEqual(response.code, 304)
def test_static_etag(self):
response = self.fetch('/static/robots.txt')
self.assertEqual(utf8(response.headers.get("Etag")),
b'"' + self.robots_txt_hash + b'"')
def test_static_with_range(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-9'})
self.assertEqual(response.code, 206)
self.assertEqual(response.body, b"User-agent")
self.assertEqual(utf8(response.headers.get("Etag")),
b'"' + self.robots_txt_hash + b'"')
self.assertEqual(response.headers.get("Content-Length"), "10")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 0-9/26")
def test_static_with_range_full_file(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-'})
# Note: Chrome refuses to play audio if it gets an HTTP 206 in response
# to ``Range: bytes=0-`` :(
self.assertEqual(response.code, 200)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()))
self.assertEqual(response.headers.get("Content-Length"), "26")
self.assertEqual(response.headers.get("Content-Range"), None)
def test_static_with_range_full_past_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=0-10000000'})
self.assertEqual(response.code, 200)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()))
self.assertEqual(response.headers.get("Content-Length"), "26")
self.assertEqual(response.headers.get("Content-Range"), None)
def test_static_with_range_partial_past_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=1-10000000'})
self.assertEqual(response.code, 206)
robots_file_path = os.path.join(self.static_dir, "robots.txt")
with open(robots_file_path) as f:
self.assertEqual(response.body, utf8(f.read()[1:]))
self.assertEqual(response.headers.get("Content-Length"), "25")
self.assertEqual(response.headers.get("Content-Range"), "bytes 1-25/26")
def test_static_with_range_end_edge(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=22-'})
self.assertEqual(response.body, b": /\n")
self.assertEqual(response.headers.get("Content-Length"), "4")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 22-25/26")
def test_static_with_range_neg_end(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=-4'})
self.assertEqual(response.body, b": /\n")
self.assertEqual(response.headers.get("Content-Length"), "4")
self.assertEqual(response.headers.get("Content-Range"),
"bytes 22-25/26")
def test_static_invalid_range(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'asdf'})
self.assertEqual(response.code, 200)
def test_static_unsatisfiable_range_zero_suffix(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=-0'})
self.assertEqual(response.headers.get("Content-Range"),
"bytes */26")
self.assertEqual(response.code, 416)
def test_static_unsatisfiable_range_invalid_start(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=26'})
self.assertEqual(response.code, 416)
self.assertEqual(response.headers.get("Content-Range"),
"bytes */26")
def test_static_head(self):
response = self.fetch('/static/robots.txt', method='HEAD')
self.assertEqual(response.code, 200)
# No body was returned, but we did get the right content length.
self.assertEqual(response.body, b'')
self.assertEqual(response.headers['Content-Length'], '26')
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_head_range(self):
response = self.fetch('/static/robots.txt', method='HEAD',
headers={'Range': 'bytes=1-4'})
self.assertEqual(response.code, 206)
self.assertEqual(response.body, b'')
self.assertEqual(response.headers['Content-Length'], '4')
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_range_if_none_match(self):
response = self.fetch('/static/robots.txt', headers={
'Range': 'bytes=1-4',
'If-None-Match': b'"' + self.robots_txt_hash + b'"'})
self.assertEqual(response.code, 304)
self.assertEqual(response.body, b'')
self.assertTrue('Content-Length' not in response.headers)
self.assertEqual(utf8(response.headers['Etag']),
b'"' + self.robots_txt_hash + b'"')
def test_static_404(self):
response = self.fetch('/static/blarg')
self.assertEqual(response.code, 404)
@wsgi_safe
class StaticDefaultFilenameTest(WebTestCase):
def get_app_kwargs(self):
return dict(static_path=relpath('static'),
static_handler_args=dict(default_filename='index.html'))
def get_handlers(self):
return []
def test_static_default_filename(self):
response = self.fetch('/static/dir/', follow_redirects=False)
self.assertEqual(response.code, 200)
self.assertEqual(b'this is the index\n', response.body)
def test_static_default_redirect(self):
response = self.fetch('/static/dir', follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertTrue(response.headers['Location'].endswith('/static/dir/'))
@wsgi_safe
class StaticFileWithPathTest(WebTestCase):
def get_app_kwargs(self):
return dict(static_path=relpath('static'),
static_handler_args=dict(default_filename='index.html'))
def get_handlers(self):
return [("/foo/(.*)", StaticFileHandler, {
"path": relpath("templates/"),
})]
def test_serve(self):
response = self.fetch("/foo/utf8.html")
self.assertEqual(response.body, b"H\xc3\xa9llo\n")
@wsgi_safe
class CustomStaticFileTest(WebTestCase):
def get_handlers(self):
class MyStaticFileHandler(StaticFileHandler):
@classmethod
def make_static_url(cls, settings, path):
version_hash = cls.get_version(settings, path)
extension_index = path.rindex('.')
before_version = path[:extension_index]
after_version = path[(extension_index + 1):]
return '/static/%s.%s.%s' % (before_version, version_hash,
after_version)
def parse_url_path(self, url_path):
extension_index = url_path.rindex('.')
version_index = url_path.rindex('.', 0, extension_index)
return '%s%s' % (url_path[:version_index],
url_path[extension_index:])
@classmethod
def get_absolute_path(cls, settings, path):
return 'CustomStaticFileTest:' + path
def validate_absolute_path(self, root, absolute_path):
return absolute_path
@classmethod
def get_content(self, path, start=None, end=None):
assert start is None and end is None
if path == 'CustomStaticFileTest:foo.txt':
return b'bar'
raise Exception("unexpected path %r" % path)
def get_modified_time(self):
return None
@classmethod
def get_version(cls, settings, path):
return "42"
class StaticUrlHandler(RequestHandler):
def get(self, path):
self.write(self.static_url(path))
self.static_handler_class = MyStaticFileHandler
return [("/static_url/(.*)", StaticUrlHandler)]
def get_app_kwargs(self):
return dict(static_path="dummy",
static_handler_class=self.static_handler_class)
def test_serve(self):
response = self.fetch("/static/foo.42.txt")
self.assertEqual(response.body, b"bar")
def test_static_url(self):
with ExpectLog(gen_log, "Could not open static file", required=False):
response = self.fetch("/static_url/foo.txt")
self.assertEqual(response.body, b"/static/foo.42.txt")
@wsgi_safe
class HostMatchingTest(WebTestCase):
class Handler(RequestHandler):
def initialize(self, reply):
self.reply = reply
def get(self):
self.write(self.reply)
def get_handlers(self):
return [("/foo", HostMatchingTest.Handler, {"reply": "wildcard"})]
def test_host_matching(self):
self.app.add_handlers("www.example.com",
[("/foo", HostMatchingTest.Handler, {"reply": "[0]"})])
self.app.add_handlers(r"www\.example\.com",
[("/bar", HostMatchingTest.Handler, {"reply": "[1]"})])
self.app.add_handlers("www.example.com",
[("/baz", HostMatchingTest.Handler, {"reply": "[2]"})])
response = self.fetch("/foo")
self.assertEqual(response.body, b"wildcard")
response = self.fetch("/bar")
self.assertEqual(response.code, 404)
response = self.fetch("/baz")
self.assertEqual(response.code, 404)
response = self.fetch("/foo", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[0]")
response = self.fetch("/bar", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[1]")
response = self.fetch("/baz", headers={'Host': 'www.example.com'})
self.assertEqual(response.body, b"[2]")
@wsgi_safe
class NamedURLSpecGroupsTest(WebTestCase):
def get_handlers(self):
class EchoHandler(RequestHandler):
def get(self, path):
self.write(path)
return [("/str/(?P<path>.*)", EchoHandler),
(u("/unicode/(?P<path>.*)"), EchoHandler)]
def test_named_urlspec_groups(self):
response = self.fetch("/str/foo")
self.assertEqual(response.body, b"foo")
response = self.fetch("/unicode/bar")
self.assertEqual(response.body, b"bar")
@wsgi_safe
class ClearHeaderTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("h1", "foo")
self.set_header("h2", "bar")
self.clear_header("h1")
self.clear_header("nonexistent")
def test_clear_header(self):
response = self.fetch("/")
self.assertTrue("h1" not in response.headers)
self.assertEqual(response.headers["h2"], "bar")
@wsgi_safe
class Header304Test(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.set_header("Content-Language", "en_US")
self.write("hello")
def test_304_headers(self):
response1 = self.fetch('/')
self.assertEqual(response1.headers["Content-Length"], "5")
self.assertEqual(response1.headers["Content-Language"], "en_US")
response2 = self.fetch('/', headers={
'If-None-Match': response1.headers["Etag"]})
self.assertEqual(response2.code, 304)
self.assertTrue("Content-Length" not in response2.headers)
self.assertTrue("Content-Language" not in response2.headers)
# Not an entity header, but should not be added to 304s by chunking
self.assertTrue("Transfer-Encoding" not in response2.headers)
@wsgi_safe
class StatusReasonTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
reason = self.request.arguments.get('reason', [])
self.set_status(int(self.get_argument('code')),
reason=reason[0] if reason else None)
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_status(self):
response = self.fetch("/?code=304")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Not Modified")
response = self.fetch("/?code=304&reason=Foo")
self.assertEqual(response.code, 304)
self.assertEqual(response.reason, "Foo")
response = self.fetch("/?code=682&reason=Bar")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Bar")
with ExpectLog(app_log, 'Uncaught exception'):
response = self.fetch("/?code=682")
self.assertEqual(response.code, 500)
@wsgi_safe
class DateHeaderTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.write("hello")
def test_date_header(self):
response = self.fetch('/')
header_date = datetime.datetime(
*email.utils.parsedate(response.headers['Date'])[:6])
self.assertTrue(header_date - datetime.datetime.utcnow() <
datetime.timedelta(seconds=2))
@wsgi_safe
class RaiseWithReasonTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
raise HTTPError(682, reason="Foo")
def get_http_client(self):
# simple_httpclient only: curl doesn't expose the reason string
return SimpleAsyncHTTPClient(io_loop=self.io_loop)
def test_raise_with_reason(self):
response = self.fetch("/")
self.assertEqual(response.code, 682)
self.assertEqual(response.reason, "Foo")
self.assertIn(b'682: Foo', response.body)
def test_httperror_str(self):
self.assertEqual(str(HTTPError(682, reason="Foo")), "HTTP 682: Foo")
@wsgi_safe
class ErrorHandlerXSRFTest(WebTestCase):
def get_handlers(self):
# note that if the handlers list is empty we get the default_host
# redirect fallback instead of a 404, so test with both an
# explicitly defined error handler and an implicit 404.
return [('/error', ErrorHandler, dict(status_code=417))]
def get_app_kwargs(self):
return dict(xsrf_cookies=True)
def test_error_xsrf(self):
response = self.fetch('/error', method='POST', body='')
self.assertEqual(response.code, 417)
def test_404_xsrf(self):
response = self.fetch('/404', method='POST', body='')
self.assertEqual(response.code, 404)
class GzipTestCase(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
if self.get_argument('vary', None):
self.set_header('Vary', self.get_argument('vary'))
self.write('hello world')
def get_app_kwargs(self):
return dict(gzip=True)
def test_gzip(self):
response = self.fetch('/')
self.assertEqual(response.headers['Content-Encoding'], 'gzip')
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_gzip_not_requested(self):
response = self.fetch('/', use_gzip=False)
self.assertNotIn('Content-Encoding', response.headers)
self.assertEqual(response.headers['Vary'], 'Accept-Encoding')
def test_vary_already_present(self):
response = self.fetch('/?vary=Accept-Language')
self.assertEqual(response.headers['Vary'],
'Accept-Language, Accept-Encoding')
@wsgi_safe
class PathArgsInPrepareTest(WebTestCase):
class Handler(RequestHandler):
def prepare(self):
self.write(dict(args=self.path_args, kwargs=self.path_kwargs))
def get(self, path):
assert path == 'foo'
self.finish()
def get_handlers(self):
return [('/pos/(.*)', self.Handler),
('/kw/(?P<path>.*)', self.Handler)]
def test_pos(self):
response = self.fetch('/pos/foo')
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {'args': ['foo'], 'kwargs': {}})
def test_kw(self):
response = self.fetch('/kw/foo')
response.rethrow()
data = json_decode(response.body)
self.assertEqual(data, {'args': [], 'kwargs': {'path': 'foo'}})
@wsgi_safe
class ClearAllCookiesTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
self.clear_all_cookies()
self.write('ok')
def test_clear_all_cookies(self):
response = self.fetch('/', headers={'Cookie': 'foo=bar; baz=xyzzy'})
set_cookies = sorted(response.headers.get_list('Set-Cookie'))
self.assertTrue(set_cookies[0].startswith('baz=;'))
self.assertTrue(set_cookies[1].startswith('foo=;'))
class PermissionError(Exception):
pass
@wsgi_safe
class ExceptionHandlerTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
exc = self.get_argument('exc')
if exc == 'http':
raise HTTPError(410, "no longer here")
elif exc == 'zero':
1 / 0
elif exc == 'permission':
raise PermissionError('not allowed')
def write_error(self, status_code, **kwargs):
if 'exc_info' in kwargs:
typ, value, tb = kwargs['exc_info']
if isinstance(value, PermissionError):
self.set_status(403)
self.write('PermissionError')
return
RequestHandler.write_error(self, status_code, **kwargs)
def log_exception(self, typ, value, tb):
if isinstance(value, PermissionError):
app_log.warning('custom logging for PermissionError: %s',
value.args[0])
else:
RequestHandler.log_exception(self, typ, value, tb)
def test_http_error(self):
# HTTPErrors are logged as warnings with no stack trace.
# TODO: extend ExpectLog to test this more precisely
with ExpectLog(gen_log, '.*no longer here'):
response = self.fetch('/?exc=http')
self.assertEqual(response.code, 410)
def test_unknown_error(self):
# Unknown errors are logged as errors with a stack trace.
with ExpectLog(app_log, 'Uncaught exception'):
response = self.fetch('/?exc=zero')
self.assertEqual(response.code, 500)
def test_known_error(self):
# log_exception can override logging behavior, and write_error
# can override the response.
with ExpectLog(app_log,
'custom logging for PermissionError: not allowed'):
response = self.fetch('/?exc=permission')
self.assertEqual(response.code, 403)
@wsgi_safe
class UIMethodUIModuleTest(SimpleHandlerTestCase):
"""Test that UI methods and modules are created correctly and
associated with the handler.
"""
class Handler(RequestHandler):
def get(self):
self.render('foo.html')
def value(self):
return self.get_argument("value")
def get_app_kwargs(self):
def my_ui_method(handler, x):
return "In my_ui_method(%s) with handler value %s." % (
x, handler.value())
class MyModule(UIModule):
def render(self, x):
return "In MyModule(%s) with handler value %s." % (
x, self.handler.value())
loader = DictLoader({
'foo.html': '{{ my_ui_method(42) }} {% module MyModule(123) %}',
})
return dict(template_loader=loader,
ui_methods={'my_ui_method': my_ui_method},
ui_modules={'MyModule': MyModule})
def tearDown(self):
super(UIMethodUIModuleTest, self).tearDown()
# TODO: fix template loader caching so this isn't necessary.
RequestHandler._template_loaders.clear()
def test_ui_method(self):
response = self.fetch('/?value=asdf')
self.assertEqual(response.body,
b'In my_ui_method(42) with handler value asdf. '
b'In MyModule(123) with handler value asdf.')
@wsgi_safe
class GetArgumentErrorTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
try:
self.get_argument('foo')
self.write({})
except MissingArgumentError as e:
self.write({'arg_name': e.arg_name,
'log_message': e.log_message})
def test_catch_error(self):
response = self.fetch('/')
self.assertEqual(json_decode(response.body),
{'arg_name': 'foo',
'log_message': 'Missing argument foo'})
class MultipleExceptionTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
exc_count = 0
@asynchronous
def get(self):
from tornado.ioloop import IOLoop
IOLoop.current().add_callback(lambda: 1 / 0)
IOLoop.current().add_callback(lambda: 1 / 0)
def log_exception(self, typ, value, tb):
MultipleExceptionTest.Handler.exc_count += 1
def test_multi_exception(self):
# This test verifies that multiple exceptions raised into the same
# ExceptionStackContext do not generate extraneous log entries
# due to "Cannot send error response after headers written".
# log_exception is called, but it does not proceed to send_error.
response = self.fetch('/')
self.assertEqual(response.code, 500)
response = self.fetch('/')
self.assertEqual(response.code, 500)
# Each of our two requests generated two exceptions, we should have
# seen at least three of them by now (the fourth may still be
# in the queue).
self.assertGreater(MultipleExceptionTest.Handler.exc_count, 2)
@wsgi_safe
class SetCurrentUserTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def prepare(self):
self.current_user = 'Ben'
def get(self):
self.write('Hello %s' % self.current_user)
def test_set_current_user(self):
# Ensure that current_user can be assigned to normally for apps
# that want to forgo the lazy get_current_user property
response = self.fetch('/')
self.assertEqual(response.body, b'Hello Ben')
@wsgi_safe
class GetCurrentUserTest(WebTestCase):
def get_app_kwargs(self):
class WithoutUserModule(UIModule):
def render(self):
return ''
class WithUserModule(UIModule):
def render(self):
return str(self.current_user)
loader = DictLoader({
'without_user.html': '',
'with_user.html': '{{ current_user }}',
'without_user_module.html': '{% module WithoutUserModule() %}',
'with_user_module.html': '{% module WithUserModule() %}',
})
return dict(template_loader=loader,
ui_modules={'WithUserModule': WithUserModule,
'WithoutUserModule': WithoutUserModule})
def tearDown(self):
super(GetCurrentUserTest, self).tearDown()
RequestHandler._template_loaders.clear()
def get_handlers(self):
class CurrentUserHandler(RequestHandler):
def prepare(self):
self.has_loaded_current_user = False
def get_current_user(self):
self.has_loaded_current_user = True
return ''
class WithoutUserHandler(CurrentUserHandler):
def get(self):
self.render_string('without_user.html')
self.finish(str(self.has_loaded_current_user))
class WithUserHandler(CurrentUserHandler):
def get(self):
self.render_string('with_user.html')
self.finish(str(self.has_loaded_current_user))
class CurrentUserModuleHandler(CurrentUserHandler):
def get_template_namespace(self):
# If RequestHandler.get_template_namespace is called, then
# get_current_user is evaluated. Until #820 is fixed, this
# is a small hack to circumvent the issue.
return self.ui
class WithoutUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string('without_user_module.html')
self.finish(str(self.has_loaded_current_user))
class WithUserModuleHandler(CurrentUserModuleHandler):
def get(self):
self.render_string('with_user_module.html')
self.finish(str(self.has_loaded_current_user))
return [('/without_user', WithoutUserHandler),
('/with_user', WithUserHandler),
('/without_user_module', WithoutUserModuleHandler),
('/with_user_module', WithUserModuleHandler)]
@unittest.skip('needs fix')
def test_get_current_user_is_lazy(self):
# TODO: Make this test pass. See #820.
response = self.fetch('/without_user')
self.assertEqual(response.body, b'False')
def test_get_current_user_works(self):
response = self.fetch('/with_user')
self.assertEqual(response.body, b'True')
def test_get_current_user_from_ui_module_is_lazy(self):
response = self.fetch('/without_user_module')
self.assertEqual(response.body, b'False')
def test_get_current_user_from_ui_module_works(self):
response = self.fetch('/with_user_module')
self.assertEqual(response.body, b'True')
@wsgi_safe
class UnimplementedHTTPMethodsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
pass
def test_unimplemented_standard_methods(self):
for method in ['HEAD', 'GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/', method=method)
self.assertEqual(response.code, 405)
for method in ['POST', 'PUT']:
response = self.fetch('/', method=method, body=b'')
self.assertEqual(response.code, 405)
class UnimplementedNonStandardMethodsTest(SimpleHandlerTestCase):
# wsgiref.validate complains about unknown methods in a way that makes
# this test not wsgi_safe.
class Handler(RequestHandler):
def other(self):
# Even though this method exists, it won't get called automatically
# because it is not in SUPPORTED_METHODS.
self.write('other')
def test_unimplemented_patch(self):
# PATCH is recently standardized; Tornado supports it by default
# but wsgiref.validate doesn't like it.
response = self.fetch('/', method='PATCH', body=b'')
self.assertEqual(response.code, 405)
def test_unimplemented_other(self):
response = self.fetch('/', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.code, 405)
@wsgi_safe
class AllHTTPMethodsTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def method(self):
self.write(self.request.method)
get = delete = options = post = put = method
def test_standard_methods(self):
response = self.fetch('/', method='HEAD')
self.assertEqual(response.body, b'')
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT']:
response = self.fetch('/', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
class PatchMethodTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def patch(self):
self.write('patch')
def other(self):
self.write('other')
def test_patch(self):
response = self.fetch('/', method='PATCH', body=b'')
self.assertEqual(response.body, b'patch')
def test_other(self):
response = self.fetch('/', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'other')
@wsgi_safe
class FinishInPrepareTest(SimpleHandlerTestCase):
class Handler(RequestHandler):
def prepare(self):
self.finish('done')
def get(self):
# It's difficult to assert for certain that a method did not
# or will not be called in an asynchronous context, but this
# will be logged noisily if it is reached.
raise Exception('should not reach this method')
def test_finish_in_prepare(self):
response = self.fetch('/')
self.assertEqual(response.body, b'done')
@wsgi_safe
class Default404Test(WebTestCase):
def get_handlers(self):
# If there are no handlers at all a default redirect handler gets added.
return [('/foo', RequestHandler)]
def test_404(self):
response = self.fetch('/')
self.assertEqual(response.code, 404)
self.assertEqual(response.body,
b'<html><title>404: Not Found</title>'
b'<body>404: Not Found</body></html>')
@wsgi_safe
class Custom404Test(WebTestCase):
def get_handlers(self):
return [('/foo', RequestHandler)]
def get_app_kwargs(self):
class Custom404Handler(RequestHandler):
def get(self):
self.set_status(404)
self.write('custom 404 response')
return dict(default_handler_class=Custom404Handler)
def test_404(self):
response = self.fetch('/')
self.assertEqual(response.code, 404)
self.assertEqual(response.body, b'custom 404 response')
@wsgi_safe
class DefaultHandlerArgumentsTest(WebTestCase):
def get_handlers(self):
return [('/foo', RequestHandler)]
def get_app_kwargs(self):
return dict(default_handler_class=ErrorHandler,
default_handler_args=dict(status_code=403))
def test_403(self):
response = self.fetch('/')
self.assertEqual(response.code, 403)
@wsgi_safe
class HandlerByNameTest(WebTestCase):
def get_handlers(self):
# All three are equivalent.
return [('/hello1', HelloHandler),
('/hello2', 'tornado.test.web_test.HelloHandler'),
url('/hello3', 'tornado.test.web_test.HelloHandler'),
]
def test_handler_by_name(self):
resp = self.fetch('/hello1')
self.assertEqual(resp.body, b'hello')
resp = self.fetch('/hello2')
self.assertEqual(resp.body, b'hello')
resp = self.fetch('/hello3')
self.assertEqual(resp.body, b'hello')
| apache-2.0 |
jgraham/servo | tests/wpt/harness/wptrunner/update/base.py | 196 | 2148 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
exit_unclean = object()
exit_clean = object()
class Step(object):
provides = []
def __init__(self, logger):
self.logger = logger
def run(self, step_index, state):
"""Base class for state-creating steps.
When a Step is run() the current state is checked to see
if the state from this step has already been created. If it
has the restore() method is invoked. Otherwise the create()
method is invoked with the state object. This is expected to
add items with all the keys in __class__.provides to the state
object.
"""
name = self.__class__.__name__
try:
stored_step = state.steps[step_index]
except IndexError:
stored_step = None
if stored_step == name:
self.restore(state)
elif stored_step is None:
self.create(state)
assert set(self.provides).issubset(set(state.keys()))
state.steps = state.steps + [name]
else:
raise ValueError("Expected a %s step, got a %s step" % (name, stored_step))
def create(self, data):
raise NotImplementedError
def restore(self, state):
self.logger.debug("Step %s using stored state" % (self.__class__.__name__,))
for key in self.provides:
assert key in state
class StepRunner(object):
steps = []
def __init__(self, logger, state):
"""Class that runs a specified series of Steps with a common State"""
self.state = state
self.logger = logger
if "steps" not in state:
state.steps = []
def run(self):
rv = None
for step_index, step in enumerate(self.steps):
self.logger.debug("Starting step %s" % step.__name__)
rv = step(self.logger).run(step_index, self.state)
if rv in (exit_clean, exit_unclean):
break
return rv
| mpl-2.0 |
Pal3love/otRebuilder | Package/otRebuilder/Dep/fontTools/misc/xmlReader.py | 1 | 4302 | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
from fontTools.ttLib.tables.DefaultTable import DefaultTable
import sys
import os
import logging
log = logging.getLogger(__name__)
class TTXParseError(Exception): pass
BUFSIZE = 0x4000
class XMLReader(object):
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None):
if fileOrPath == '-':
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
self._closeStream = True
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
self.contentStack = []
self.stackSize = 0
def read(self, rootless=False):
if rootless:
self.stackSize += 1
if self.progress:
self.file.seek(0, 2)
fileSize = self.file.tell()
self.progress.set(0, fileSize // 100 or 1)
self.file.seek(0)
self._parseFile(self.file)
if self._closeStream:
self.close()
if rootless:
self.stackSize -= 1
def close(self):
self.file.close()
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
pos = 0
while True:
chunk = file.read(BUFSIZE)
if not chunk:
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
def _startElementHandler(self, name, attrs):
stackSize = self.stackSize
self.stackSize = stackSize + 1
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, 'name'):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == 'loca' and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _characterDataHandler(self, data):
if self.stackSize > 1:
self.contentStack[-1].append(data)
def _endElementHandler(self, name):
self.stackSize = self.stackSize - 1
del self.contentStack[-1]
if self.stackSize == 1:
self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)
def set(self, val, maxval=None):
pass
def increment(self, val=1):
pass
def setLabel(self, text):
print(text)
| mit |
ganeshrn/ansible | test/units/galaxy/test_collection_install.py | 15 | 43234 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import stat
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api, dependency_resolution
from ansible.galaxy.dependency_resolution.dataclasses import Candidate, Requirement
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
class RequirementCandidates():
def __init__(self):
self.candidates = []
def func_wrapper(self, func):
def run(*args, **kwargs):
self.candidates = func(*args, **kwargs)
return self.candidates
return run
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == collection_artifact[0]
assert actual.ver == u'0.1.0'
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == to_text(version)
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_artifact_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# a collection artifact should always contain a valid version
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = (
'^Collection metadata file `.*` at `.*` is expected to have a valid SemVer '
'version value but got {empty_unicode_string!r}$'.
format(empty_unicode_string=u'')
)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# version may be falsey/arbitrary strings for collections in development
manifest_path = os.path.join(collection_artifact[0], b'galaxy.yml')
metadata = {
'authors': ['Ansible'],
'readme': 'README.md',
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {},
}
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(yaml.safe_dump(metadata)))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == u'*'
def test_build_requirement_from_tar(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_requirement_dict({'name': to_text(collection_artifact[1])}, concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == to_text(collection_artifact[1])
assert actual.ver == u'0.1.0'
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(test_file)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
with pytest.raises(KeyError, match='namespace'):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_name(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_version_metadata = MagicMock(
namespace='namespace', name='collection',
version='2.1.10', artifact_sha256='', dependencies={}
)
monkeypatch.setattr(api.GalaxyAPI, 'get_collection_version_metadata', mock_version_metadata)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
collections = ['namespace.collection']
requirements_file = None
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', collections[0]])
requirements = cli._require_one_of_collections_requirements(
collections, requirements_file, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.ver == u'2.1.10'
assert actual.src == galaxy_server
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:2.0.1-beta.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:2.0.1-beta.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1-beta.1'
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.0.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_version_list = MagicMock()
mock_version_list.return_value = []
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_version_list)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>1.0.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [broken_server, galaxy_server], concrete_artifact_cm, None, True, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'1.0.3'
assert mock_version_list.call_count == 1
assert mock_version_list.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.return_value = []
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n* namespace.collection:* (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, False, False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:==2.0.0'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:==2.0.0'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.0'
assert [c.ver for c in matches.candidates] == [u'2.0.0']
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>=2.0.1,<2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>=2.0.1,<2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert [c.ver for c in matches.candidates] == [u'2.0.1']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.5'
# should be ordered latest to earliest
assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=2.0.5 (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_dep_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info_return = [
api.CollectionVersionMetadata('parent', 'collection', '2.0.5', None, None, {'namespace.collection': '!=1.0.0'}),
api.CollectionVersionMetadata('namespace', 'collection', '1.0.0', None, None, {}),
]
mock_get_info = MagicMock(side_effect=mock_get_info_return)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(side_effect=[['2.0.5'], ['1.0.0']])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'parent.collection:2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['parent.collection:2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=1.0.0 (dependency of parent.collection:2.0.5)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_install_installed_collection(monkeypatch, tmp_path_factory, galaxy_server):
mock_installed_collections = MagicMock(return_value=[Candidate('namespace.collection', '1.2.3', None, 'dir')])
monkeypatch.setattr(collection, 'find_existing_collections', mock_installed_collections)
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.2.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(return_value=['1.2.3', '1.3.0'])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
cli.run()
expected = "Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`."
assert mock_display.mock_calls[1][1][0] == expected
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
output_path = os.path.join(os.path.split(collection_tar)[0])
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
candidate = Candidate('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')
collection.install(candidate, to_text(output_path), concrete_artifact_cm)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
shutil.rmtree(collection_path)
collections_dir = ('%s' % os.path.sep).join(to_text(collection_path).split('%s' % os.path.sep)[:-2])
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(concrete_artifact_cm, 'get_galaxy_artifact_path', mock_download)
req = Requirement('ansible_namespace.collection', '0.1.0', 'https://downloadme.com', 'galaxy')
collection.install(req, to_text(collections_dir), concrete_artifact_cm)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0].src == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][0].type == 'galaxy'
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
assert os.path.isdir(collection_path)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 1
assert display_msgs[0] == 'Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`.'
for msg in display_msgs:
assert 'WARNING' not in msg
def test_install_missing_metadata_warning(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
for file in [b'MANIFEST.json', b'galaxy.yml']:
b_path = os.path.join(collection_path, file)
if os.path.isfile(b_path):
os.unlink(b_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert 'WARNING' in display_msgs[0]
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
assert display_msgs[3] == "ansible_namespace.collection:0.1.0 was installed successfully"
| gpl-3.0 |
hnakamur/django | django/contrib/gis/gdal/srs.py | 366 | 12043 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
def __init__(self, srs_input='', srs_type='user'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr and capi:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr and capi:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
pmghalvorsen/gramps_branch | gramps/plugins/gramplet/topsurnamesgramplet.py | 2 | 3999 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2009 Douglas S. Blank <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from collections import defaultdict
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.plug import Gramplet
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.config import config
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_YIELD_INTERVAL = 350
#------------------------------------------------------------------------
#
# TopSurnamesGramplet class
#
#------------------------------------------------------------------------
class TopSurnamesGramplet(Gramplet):
def init(self):
self.set_tooltip(_("Double-click surname for details"))
self.top_size = 10 # will be overwritten in load
self.set_text(_("No Family Tree loaded."))
def db_changed(self):
self.dbstate.db.connect('person-add', self.update)
self.dbstate.db.connect('person-delete', self.update)
self.dbstate.db.connect('person-update', self.update)
self.dbstate.db.connect('person-rebuild', self.update)
self.dbstate.db.connect('family-rebuild', self.update)
def on_load(self):
if len(self.gui.data) > 0:
self.top_size = int(self.gui.data[0])
def on_save(self):
self.gui.data = [self.top_size]
def main(self):
self.set_text(_("Processing...") + "\n")
surnames = defaultdict(int)
representative_handle = {}
cnt = 0
for person in self.dbstate.db.iter_people():
allnames = [person.get_primary_name()] + person.get_alternate_names()
allnames = set([name.get_group_name().strip() for name in allnames])
for surname in allnames:
surnames[surname] += 1
representative_handle[surname] = person.handle
cnt += 1
if not cnt % _YIELD_INTERVAL:
yield True
total_people = cnt
surname_sort = []
total = 0
cnt = 0
for surname in surnames:
surname_sort.append( (surnames[surname], surname) )
total += surnames[surname]
cnt += 1
if not cnt % _YIELD_INTERVAL:
yield True
total_surnames = cnt
surname_sort.sort(reverse=True)
line = 0
### All done!
self.set_text("")
nosurname = config.get('preferences.no-surname-text')
for (count, surname) in surname_sort:
text = "%s, " % (surname if surname else nosurname)
text += "%d%% (%d)\n" % (int((float(count)/total) * 100), count)
self.append_text(" %d. " % (line + 1))
self.link(text, 'Surname', representative_handle[surname])
line += 1
if line >= self.top_size:
break
self.append_text(("\n" + _("Total unique surnames") + ": %d\n") %
total_surnames)
self.append_text((_("Total people") + ": %d") % total_people, "begin")
| gpl-2.0 |
daoluan/decode-Django | Django-1.5.1/tests/regressiontests/fixtures_regress/models.py | 60 | 5752 | from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import User
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Animal(models.Model):
name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
count = models.IntegerField()
weight = models.FloatField()
# use a non-default name for the default manager
specimens = models.Manager()
def __str__(self):
return self.name
class Plant(models.Model):
name = models.CharField(max_length=150)
class Meta:
# For testing when upper case letter in app name; regression for #4057
db_table = "Fixtures_regress_plant"
@python_2_unicode_compatible
class Stuff(models.Model):
name = models.CharField(max_length=20, null=True)
owner = models.ForeignKey(User, null=True)
def __str__(self):
return six.text_type(self.name) + ' is owned by ' + six.text_type(self.owner)
class Absolute(models.Model):
name = models.CharField(max_length=40)
load_count = 0
def __init__(self, *args, **kwargs):
super(Absolute, self).__init__(*args, **kwargs)
Absolute.load_count += 1
class Parent(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ('id',)
class Child(Parent):
data = models.CharField(max_length=10)
# Models to regression test #7572
class Channel(models.Model):
name = models.CharField(max_length=255)
class Article(models.Model):
title = models.CharField(max_length=255)
channels = models.ManyToManyField(Channel)
class Meta:
ordering = ('id',)
# Models to regression test #11428
@python_2_unicode_compatible
class Widget(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class WidgetProxy(Widget):
class Meta:
proxy = True
# Check for forward references in FKs and M2Ms with natural keys
class TestManager(models.Manager):
def get_by_natural_key(self, key):
return self.get(name=key)
@python_2_unicode_compatible
class Store(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
main = models.ForeignKey('self', null=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
@python_2_unicode_compatible
class Person(models.Model):
objects = TestManager()
name = models.CharField(max_length=255)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
# Person doesn't actually have a dependency on store, but we need to define
# one to test the behavior of the dependency resolution algorithm.
def natural_key(self):
return (self.name,)
natural_key.dependencies = ['fixtures_regress.store']
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=255)
author = models.ForeignKey(Person)
stores = models.ManyToManyField(Store)
class Meta:
ordering = ('name',)
def __str__(self):
return '%s by %s (available at %s)' % (
self.name,
self.author.name,
', '.join(s.name for s in self.stores.all())
)
class NKManager(models.Manager):
def get_by_natural_key(self, data):
return self.get(data=data)
@python_2_unicode_compatible
class NKChild(Parent):
data = models.CharField(max_length=10, unique=True)
objects = NKManager()
def natural_key(self):
return self.data
def __str__(self):
return 'NKChild %s:%s' % (self.name, self.data)
@python_2_unicode_compatible
class RefToNKChild(models.Model):
text = models.CharField(max_length=10)
nk_fk = models.ForeignKey(NKChild, related_name='ref_fks')
nk_m2m = models.ManyToManyField(NKChild, related_name='ref_m2ms')
def __str__(self):
return '%s: Reference to %s [%s]' % (
self.text,
self.nk_fk,
', '.join(str(o) for o in self.nk_m2m.all())
)
# ome models with pathological circular dependencies
class Circle1(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle2']
class Circle2(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle1']
class Circle3(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle3']
class Circle4(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle5']
class Circle5(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle6']
class Circle6(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.circle4']
class ExternalDependency(models.Model):
name = models.CharField(max_length=255)
def natural_key(self):
return self.name
natural_key.dependencies = ['fixtures_regress.book']
# Model for regression test of #11101
class Thingy(models.Model):
name = models.CharField(max_length=255)
| gpl-2.0 |
lyapota/m8_sense_lollipop | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
zanderle/django | django/contrib/gis/gdal/envelope.py | 477 | 7009 | """
The GDAL/OGR library uses an Envelope structure to hold the bounding
box information for a geometry. The envelope (bounding box) contains
two pairs of coordinates, one for the lower left coordinate and one
for the upper right coordinate:
+----------o Upper right; (max_x, max_y)
| |
| |
| |
Lower left (min_x, min_y) o----------+
"""
from ctypes import Structure, c_double
from django.contrib.gis.gdal.error import GDALException
# The OGR definition of an Envelope is a C structure containing four doubles.
# See the 'ogr_core.h' source file for more information:
# http://www.gdal.org/ogr/ogr__core_8h-source.html
class OGREnvelope(Structure):
"Represents the OGREnvelope C Structure."
_fields_ = [("MinX", c_double),
("MaxX", c_double),
("MinY", c_double),
("MaxY", c_double),
]
class Envelope(object):
"""
The Envelope object is a C structure that contains the minimum and
maximum X, Y coordinates for a rectangle bounding box. The naming
of the variables is compatible with the OGR Envelope structure.
"""
def __init__(self, *args):
"""
The initialization function may take an OGREnvelope structure, 4-element
tuple or list, or 4 individual arguments.
"""
if len(args) == 1:
if isinstance(args[0], OGREnvelope):
# OGREnvelope (a ctypes Structure) was passed in.
self._envelope = args[0]
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) != 4:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
self._from_sequence(args[0])
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 4:
# Individual parameters passed in.
# Thanks to ww for the help
self._from_sequence([float(a) for a in args])
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args))
# Checking the x,y coordinates
if self.min_x > self.max_x:
raise GDALException('Envelope minimum X > maximum X.')
if self.min_y > self.max_y:
raise GDALException('Envelope minimum Y > maximum Y.')
def __eq__(self, other):
"""
Returns True if the envelopes are equivalent; can compare against
other Envelopes and 4-tuples.
"""
if isinstance(other, Envelope):
return (self.min_x == other.min_x) and (self.min_y == other.min_y) and \
(self.max_x == other.max_x) and (self.max_y == other.max_y)
elif isinstance(other, tuple) and len(other) == 4:
return (self.min_x == other[0]) and (self.min_y == other[1]) and \
(self.max_x == other[2]) and (self.max_y == other[3])
else:
raise GDALException('Equivalence testing only works with other Envelopes.')
def __str__(self):
"Returns a string representation of the tuple."
return str(self.tuple)
def _from_sequence(self, seq):
"Initializes the C OGR Envelope structure from the given sequence."
self._envelope = OGREnvelope()
self._envelope.MinX = seq[0]
self._envelope.MinY = seq[1]
self._envelope.MaxX = seq[2]
self._envelope.MaxY = seq[3]
def expand_to_include(self, *args):
"""
Modifies the envelope to expand to include the boundaries of
the passed-in 2-tuple (a point), 4-tuple (an extent) or
envelope.
"""
# We provide a number of different signatures for this method,
# and the logic here is all about converting them into a
# 4-tuple single parameter which does the actual work of
# expanding the envelope.
if len(args) == 1:
if isinstance(args[0], Envelope):
return self.expand_to_include(args[0].tuple)
elif hasattr(args[0], 'x') and hasattr(args[0], 'y'):
return self.expand_to_include(args[0].x, args[0].y, args[0].x, args[0].y)
elif isinstance(args[0], (tuple, list)):
# A tuple was passed in.
if len(args[0]) == 2:
return self.expand_to_include((args[0][0], args[0][1], args[0][0], args[0][1]))
elif len(args[0]) == 4:
(minx, miny, maxx, maxy) = args[0]
if minx < self._envelope.MinX:
self._envelope.MinX = minx
if miny < self._envelope.MinY:
self._envelope.MinY = miny
if maxx > self._envelope.MaxX:
self._envelope.MaxX = maxx
if maxy > self._envelope.MaxY:
self._envelope.MaxY = maxy
else:
raise GDALException('Incorrect number of tuple elements (%d).' % len(args[0]))
else:
raise TypeError('Incorrect type of argument: %s' % str(type(args[0])))
elif len(args) == 2:
# An x and an y parameter were passed in
return self.expand_to_include((args[0], args[1], args[0], args[1]))
elif len(args) == 4:
# Individual parameters passed in.
return self.expand_to_include(args)
else:
raise GDALException('Incorrect number (%d) of arguments.' % len(args[0]))
@property
def min_x(self):
"Returns the value of the minimum X coordinate."
return self._envelope.MinX
@property
def min_y(self):
"Returns the value of the minimum Y coordinate."
return self._envelope.MinY
@property
def max_x(self):
"Returns the value of the maximum X coordinate."
return self._envelope.MaxX
@property
def max_y(self):
"Returns the value of the maximum Y coordinate."
return self._envelope.MaxY
@property
def ur(self):
"Returns the upper-right coordinate."
return (self.max_x, self.max_y)
@property
def ll(self):
"Returns the lower-left coordinate."
return (self.min_x, self.min_y)
@property
def tuple(self):
"Returns a tuple representing the envelope."
return (self.min_x, self.min_y, self.max_x, self.max_y)
@property
def wkt(self):
"Returns WKT representing a Polygon for this envelope."
# TODO: Fix significant figures.
return 'POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))' % \
(self.min_x, self.min_y, self.min_x, self.max_y,
self.max_x, self.max_y, self.max_x, self.min_y,
self.min_x, self.min_y)
| bsd-3-clause |
Hellenn/doctors_joomla | templates/doctors/node_modules/node-gyp/gyp/buildbot/buildbot_run.py | 1467 | 4228 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import os
import shutil
import subprocess
import sys
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
if sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| gpl-2.0 |
cloudbase/cinder | cinder/tests/unit/group/test_groups_manager.py | 4 | 31202 | # Copyright (C) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import importutils
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import quota
from cinder import test
from cinder.tests.unit import conf_fixture
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import utils as tests_utils
from cinder.volume import api as volume_api
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume import utils as volutils
GROUP_QUOTAS = quota.GROUP_QUOTAS
CONF = cfg.CONF
class GroupManagerTestCase(test.TestCase):
def setUp(self):
super(GroupManagerTestCase, self).setUp()
self.volume = importutils.import_object(CONF.volume_manager)
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = fake.USER_ID
self.project_id = fake.PROJECT3_ID
self.context.project_id = self.project_id
self.volume.driver.set_initialized()
self.volume.stats = {'allocated_capacity_gb': 0,
'pools': {}}
self.volume_api = volume_api.API()
def test_delete_volume_in_group(self):
"""Test deleting a volume that's tied to a group fails."""
volume_params = {'status': 'available',
'group_id': fake.GROUP_ID}
volume = tests_utils.create_volume(self.context, **volume_params)
self.assertRaises(exception.InvalidVolume,
self.volume_api.delete, self.context, volume)
@mock.patch.object(GROUP_QUOTAS, "reserve",
return_value=["RESERVATION"])
@mock.patch.object(GROUP_QUOTAS, "commit")
@mock.patch.object(GROUP_QUOTAS, "rollback")
@mock.patch.object(driver.VolumeDriver,
"delete_group",
return_value=({'status': (
fields.GroupStatus.DELETED)}, []))
def test_create_delete_group(self, fake_delete_grp,
fake_rollback,
fake_commit, fake_reserve):
"""Test group can be created and deleted."""
def fake_driver_create_grp(context, group):
"""Make sure that the pool is part of the host."""
self.assertIn('host', group)
host = group.host
pool = volutils.extract_host(host, level='pool')
self.assertEqual('fakepool', pool)
return {'status': fields.GroupStatus.AVAILABLE}
self.mock_object(self.volume.driver, 'create_group',
fake_driver_create_grp)
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
host='fakehost@fakedrv#fakepool',
group_type_id=fake.GROUP_TYPE_ID)
group = objects.Group.get_by_id(self.context, group.id)
self.assertEqual(0, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.create_group(self.context, group)
self.assertEqual(2, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[0]
self.assertEqual('group.create.start', msg['event_type'])
expected = {
'status': fields.GroupStatus.AVAILABLE,
'name': 'test_group',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': fake.USER_ID,
'group_id': group.id,
'group_type': fake.GROUP_TYPE_ID
}
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[1]
self.assertEqual('group.create.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
self.assertEqual(
group.id,
objects.Group.get_by_id(context.get_admin_context(),
group.id).id)
self.volume.delete_group(self.context, group)
grp = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'), group.id)
self.assertEqual(fields.GroupStatus.DELETED, grp.status)
self.assertEqual(4, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[2]
self.assertEqual('group.delete.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[3]
self.assertEqual('group.delete.end', msg['event_type'])
expected['status'] = fields.GroupStatus.DELETED
self.assertDictMatch(expected, msg['payload'])
self.assertRaises(exception.NotFound,
objects.Group.get_by_id,
self.context,
group.id)
@mock.patch.object(GROUP_QUOTAS, "reserve",
return_value=["RESERVATION"])
@mock.patch.object(GROUP_QUOTAS, "commit")
@mock.patch.object(GROUP_QUOTAS, "rollback")
@mock.patch.object(driver.VolumeDriver,
"create_group",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"update_group")
def test_update_group(self, fake_update_grp,
fake_create_grp, fake_rollback,
fake_commit, fake_reserve):
"""Test group can be updated."""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
self.volume.create_group(self.context, group)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
volume_type_id=fake.VOLUME_TYPE_ID,
status='available',
host=group.host)
self.volume.create_volume(self.context, volume)
volume2 = tests_utils.create_volume(
self.context,
group_id=None,
volume_type_id=fake.VOLUME_TYPE_ID,
status='available',
host=group.host)
self.volume.create_volume(self.context, volume)
fake_update_grp.return_value = (
{'status': fields.GroupStatus.AVAILABLE},
[{'id': volume2.id, 'status': 'available'}],
[{'id': volume.id, 'status': 'available'}])
self.volume.update_group(self.context, group,
add_volumes=volume2.id,
remove_volumes=volume.id)
grp = objects.Group.get_by_id(self.context, group.id)
expected = {
'status': fields.GroupStatus.AVAILABLE,
'name': 'test_group',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': fake.USER_ID,
'group_id': group.id,
'group_type': fake.GROUP_TYPE_ID
}
self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status)
self.assertEqual(10, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('group.update.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('group.update.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
grpvolumes = db.volume_get_all_by_generic_group(self.context, group.id)
grpvol_ids = [grpvol['id'] for grpvol in grpvolumes]
# Verify volume is removed.
self.assertNotIn(volume.id, grpvol_ids)
# Verify volume is added.
self.assertIn(volume2.id, grpvol_ids)
volume3 = tests_utils.create_volume(
self.context,
group_id=None,
host=group.host,
volume_type_id=fake.VOLUME_TYPE_ID,
status='wrong-status')
volume_id3 = volume3['id']
volume_get_orig = self.volume.db.volume_get
self.volume.db.volume_get = mock.Mock(
return_value={'status': 'wrong_status',
'id': volume_id3})
# Try to add a volume in wrong status
self.assertRaises(exception.InvalidVolume,
self.volume.update_group,
self.context,
group,
add_volumes=volume_id3,
remove_volumes=None)
self.volume.db.volume_get.reset_mock()
self.volume.db.volume_get = volume_get_orig
@mock.patch.object(driver.VolumeDriver,
"create_group",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"delete_group",
return_value=({'status': 'deleted'}, []))
@mock.patch.object(driver.VolumeDriver,
"create_group_snapshot",
return_value={'status': 'available'})
@mock.patch.object(driver.VolumeDriver,
"delete_group_snapshot",
return_value=({'status': 'deleted'}, []))
@mock.patch.object(driver.VolumeDriver,
"create_group_from_src",
return_value=(None, None))
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_volume_from_snapshot')
@mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.'
'create_cloned_volume')
def test_create_group_from_src(self,
mock_create_cloned_vol,
mock_create_vol_from_snap,
mock_create_from_src,
mock_delete_grpsnap,
mock_create_grpsnap,
mock_delete_grp,
mock_create_grp):
"""Test group can be created and deleted."""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
status=fields.GroupStatus.AVAILABLE,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
status='available',
host=group.host,
volume_type_id=fake.VOLUME_TYPE_ID,
size=1)
volume_id = volume['id']
group_snapshot_returns = self._create_group_snapshot(group.id,
[volume_id])
group_snapshot = group_snapshot_returns[0]
snapshot_id = group_snapshot_returns[1][0]['id']
# Create group from source group snapshot.
group2 = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
group_snapshot_id=group_snapshot.id,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
group2 = objects.Group.get_by_id(self.context, group2.id)
volume2 = tests_utils.create_volume(
self.context,
group_id=group2.id,
snapshot_id=snapshot_id,
status='available',
host=group2.host,
volume_type_id=fake.VOLUME_TYPE_ID)
self.volume.create_volume(self.context, volume2)
self.volume.create_group_from_src(
self.context, group2, group_snapshot=group_snapshot)
grp2 = objects.Group.get_by_id(self.context, group2.id)
expected = {
'status': fields.GroupStatus.AVAILABLE,
'name': 'test_group',
'availability_zone': 'nova',
'tenant_id': self.context.project_id,
'created_at': 'DONTCARE',
'user_id': fake.USER_ID,
'group_id': group2.id,
'group_type': fake.GROUP_TYPE_ID,
}
self.assertEqual(fields.GroupStatus.AVAILABLE, grp2.status)
self.assertEqual(group2.id, grp2['id'])
self.assertEqual(group_snapshot.id, grp2['group_snapshot_id'])
self.assertIsNone(grp2['source_group_id'])
msg = self.notifier.notifications[2]
self.assertEqual('group.create.start', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[4]
self.assertEqual('group.create.end', msg['event_type'])
self.assertDictMatch(expected, msg['payload'])
if len(self.notifier.notifications) > 6:
self.assertFalse(self.notifier.notifications[6],
self.notifier.notifications)
self.assertEqual(6, len(self.notifier.notifications),
self.notifier.notifications)
self.volume.delete_group(self.context, group2)
if len(self.notifier.notifications) > 9:
self.assertFalse(self.notifier.notifications[10],
self.notifier.notifications)
self.assertEqual(9, len(self.notifier.notifications),
self.notifier.notifications)
msg = self.notifier.notifications[6]
self.assertEqual('group.delete.start', msg['event_type'])
expected['status'] = fields.GroupStatus.AVAILABLE
self.assertDictMatch(expected, msg['payload'])
msg = self.notifier.notifications[8]
self.assertEqual('group.delete.end', msg['event_type'])
expected['status'] = fields.GroupStatus.DELETED
self.assertDictMatch(expected, msg['payload'])
grp2 = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'), group2.id)
self.assertEqual(fields.GroupStatus.DELETED, grp2.status)
self.assertRaises(exception.NotFound,
objects.Group.get_by_id,
self.context,
group2.id)
# Create group from source group
group3 = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
source_group_id=group.id,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
volume3 = tests_utils.create_volume(
self.context,
group_id=group3.id,
source_volid=volume_id,
status='available',
host=group3.host,
volume_type_id=fake.VOLUME_TYPE_ID)
self.volume.create_volume(self.context, volume3)
self.volume.create_group_from_src(
self.context, group3, source_group=group)
grp3 = objects.Group.get_by_id(self.context, group3.id)
self.assertEqual(fields.GroupStatus.AVAILABLE, grp3.status)
self.assertEqual(group3.id, grp3.id)
self.assertEqual(group.id, grp3.source_group_id)
self.assertIsNone(grp3.group_snapshot_id)
self.volume.delete_group_snapshot(self.context, group_snapshot)
self.volume.delete_group(self.context, group)
def test_sort_snapshots(self):
vol1 = {'id': fake.VOLUME_ID, 'name': 'volume 1',
'snapshot_id': fake.SNAPSHOT_ID,
'group_id': fake.GROUP_ID}
vol2 = {'id': fake.VOLUME2_ID, 'name': 'volume 2',
'snapshot_id': fake.SNAPSHOT2_ID,
'group_id': fake.GROUP_ID}
vol3 = {'id': fake.VOLUME3_ID, 'name': 'volume 3',
'snapshot_id': fake.SNAPSHOT3_ID,
'group_id': fake.GROUP_ID}
snp1 = {'id': fake.SNAPSHOT_ID, 'name': 'snap 1',
'group_snapshot_id': fake.GROUP_ID}
snp2 = {'id': fake.SNAPSHOT2_ID, 'name': 'snap 2',
'group_snapshot_id': fake.GROUP_ID}
snp3 = {'id': fake.SNAPSHOT3_ID, 'name': 'snap 3',
'group_snapshot_id': fake.GROUP_ID}
snp1_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp1)
snp2_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp2)
snp3_obj = fake_snapshot.fake_snapshot_obj(self.context, **snp3)
volumes = []
snapshots = []
volumes.append(vol1)
volumes.append(vol2)
volumes.append(vol3)
snapshots.append(snp2_obj)
snapshots.append(snp3_obj)
snapshots.append(snp1_obj)
i = 0
for vol in volumes:
snap = snapshots[i]
i += 1
self.assertNotEqual(vol['snapshot_id'], snap.id)
sorted_snaps = self.volume._sort_snapshots(volumes, snapshots)
i = 0
for vol in volumes:
snap = sorted_snaps[i]
i += 1
self.assertEqual(vol['snapshot_id'], snap.id)
snapshots[2]['id'] = fake.WILL_NOT_BE_FOUND_ID
self.assertRaises(exception.SnapshotNotFound,
self.volume._sort_snapshots,
volumes, snapshots)
self.assertRaises(exception.InvalidInput,
self.volume._sort_snapshots,
volumes, [])
def test_sort_source_vols(self):
vol1 = {'id': '1', 'name': 'volume 1',
'source_volid': '1',
'group_id': '2'}
vol2 = {'id': '2', 'name': 'volume 2',
'source_volid': '2',
'group_id': '2'}
vol3 = {'id': '3', 'name': 'volume 3',
'source_volid': '3',
'group_id': '2'}
src_vol1 = {'id': '1', 'name': 'source vol 1',
'group_id': '1'}
src_vol2 = {'id': '2', 'name': 'source vol 2',
'group_id': '1'}
src_vol3 = {'id': '3', 'name': 'source vol 3',
'group_id': '1'}
volumes = []
src_vols = []
volumes.append(vol1)
volumes.append(vol2)
volumes.append(vol3)
src_vols.append(src_vol2)
src_vols.append(src_vol3)
src_vols.append(src_vol1)
i = 0
for vol in volumes:
src_vol = src_vols[i]
i += 1
self.assertNotEqual(vol['source_volid'], src_vol['id'])
sorted_src_vols = self.volume._sort_source_vols(volumes, src_vols)
i = 0
for vol in volumes:
src_vol = sorted_src_vols[i]
i += 1
self.assertEqual(vol['source_volid'], src_vol['id'])
src_vols[2]['id'] = '9999'
self.assertRaises(exception.VolumeNotFound,
self.volume._sort_source_vols,
volumes, src_vols)
self.assertRaises(exception.InvalidInput,
self.volume._sort_source_vols,
volumes, [])
def _create_group_snapshot(self, group_id, volume_ids, size='0'):
"""Create a group_snapshot object."""
grpsnap = objects.GroupSnapshot(self.context)
grpsnap.user_id = fake.USER_ID
grpsnap.project_id = fake.PROJECT_ID
grpsnap.group_id = group_id
grpsnap.status = fields.GroupStatus.CREATING
grpsnap.create()
# Create snapshot list
for volume_id in volume_ids:
snaps = []
snap = objects.Snapshot(context.get_admin_context())
snap.volume_size = size
snap.user_id = fake.USER_ID
snap.project_id = fake.PROJECT_ID
snap.volume_id = volume_id
snap.status = fields.SnapshotStatus.AVAILABLE
snap.group_snapshot_id = grpsnap.id
snap.create()
snaps.append(snap)
return grpsnap, snaps
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
autospec=True,
return_value={'status': 'available'})
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group',
autospec=True,
return_value=({'status': 'deleted'}, []))
@mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot',
autospec=True,
return_value=({'status': 'available'}, []))
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group_snapshot',
autospec=True,
return_value=({'status': 'deleted'}, []))
def test_create_delete_group_snapshot(self,
mock_del_grpsnap,
mock_create_grpsnap,
mock_del_grp,
_mock_create_grp,
mock_notify):
"""Test group_snapshot can be created and deleted."""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
host=group.host,
volume_type_id=fake.VOLUME_TYPE_ID)
self.volume.create_volume(self.context, volume)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end']))
group_snapshot_returns = self._create_group_snapshot(group.id,
[volume.id])
group_snapshot = group_snapshot_returns[0]
self.volume.create_group_snapshot(self.context, group_snapshot)
self.assertEqual(group_snapshot.id,
objects.GroupSnapshot.get_by_id(
context.get_admin_context(),
group_snapshot.id).id)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'group_snapshot.create.start'],
['INFO', 'snapshot.create.start'],
['INFO', 'group_snapshot.create.end'],
['INFO', 'snapshot.create.end']))
self.volume.delete_group_snapshot(self.context, group_snapshot)
self.assert_notify_called(mock_notify,
(['INFO', 'volume.create.start'],
['INFO', 'volume.create.end'],
['INFO', 'group_snapshot.create.start'],
['INFO', 'snapshot.create.start'],
['INFO', 'group_snapshot.create.end'],
['INFO', 'snapshot.create.end'],
['INFO', 'group_snapshot.delete.start'],
['INFO', 'snapshot.delete.start'],
['INFO', 'group_snapshot.delete.end'],
['INFO', 'snapshot.delete.end']))
grpsnap = objects.GroupSnapshot.get_by_id(
context.get_admin_context(read_deleted='yes'),
group_snapshot.id)
self.assertEqual('deleted', grpsnap.status)
self.assertRaises(exception.NotFound,
objects.GroupSnapshot.get_by_id,
self.context,
group_snapshot.id)
self.volume.delete_group(self.context, group)
self.assertTrue(mock_create_grpsnap.called)
self.assertTrue(mock_del_grpsnap.called)
self.assertTrue(mock_del_grp.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
return_value={'status': 'available'})
@mock.patch('cinder.volume.driver.VolumeDriver.delete_group',
return_value=({'status': 'deleted'}, []))
def test_delete_group_correct_host(self,
mock_del_grp,
_mock_create_grp):
"""Test group can be deleted.
Test group can be deleted when volumes are on
the correct volume node.
"""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
host='host1@backend1#pool1',
status='creating',
volume_type_id=fake.VOLUME_TYPE_ID,
size=1)
self.volume.host = 'host1@backend1'
self.volume.create_volume(self.context, volume)
self.volume.delete_group(self.context, group)
grp = objects.Group.get_by_id(
context.get_admin_context(read_deleted='yes'),
group.id)
self.assertEqual(fields.GroupStatus.DELETED, grp.status)
self.assertRaises(exception.NotFound,
objects.Group.get_by_id,
self.context,
group.id)
self.assertTrue(mock_del_grp.called)
@mock.patch('cinder.volume.driver.VolumeDriver.create_group',
return_value={'status': 'available'})
def test_delete_group_wrong_host(self, *_mock_create_grp):
"""Test group cannot be deleted.
Test group cannot be deleted when volumes in the
group are not local to the volume node.
"""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
host='host1@backend1#pool1',
status='creating',
volume_type_id=fake.VOLUME_TYPE_ID,
size=1)
self.volume.host = 'host1@backend2'
self.volume.create_volume(self.context, volume)
self.assertRaises(exception.InvalidVolume,
self.volume.delete_group,
self.context,
group)
grp = objects.Group.get_by_id(self.context, group.id)
# Group is not deleted
self.assertEqual(fields.GroupStatus.AVAILABLE, grp.status)
def test_create_volume_with_group_invalid_type(self):
"""Test volume creation with group & invalid volume type."""
vol_type = db.volume_type_create(
context.get_admin_context(),
dict(name=conf_fixture.def_vol_type, extra_specs={})
)
db_vol_type = db.volume_type_get(context.get_admin_context(),
vol_type.id)
grp = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
status=fields.GroupStatus.AVAILABLE,
volume_type_ids=[db_vol_type['id']],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
fake_type = {
'id': '9999',
'name': 'fake',
}
# Volume type must be provided when creating a volume in a
# group.
self.assertRaises(exception.InvalidInput,
self.volume_api.create,
self.context, 1, 'vol1', 'volume 1',
group=grp)
# Volume type must be valid.
self.assertRaises(exception.InvalidInput,
self.volume_api.create,
self.context, 1, 'vol1', 'volume 1',
volume_type=fake_type,
group=grp)
@mock.patch('cinder.volume.driver.VolumeDriver.create_group_snapshot',
autospec=True,
return_value=({'status': 'available'}, []))
def test_create_group_snapshot_with_bootable_volumes(self,
mock_create_grpsnap):
"""Test group_snapshot can be created and deleted."""
group = tests_utils.create_group(
self.context,
availability_zone=CONF.storage_availability_zone,
volume_type_ids=[fake.VOLUME_TYPE_ID],
group_type_id=fake.GROUP_TYPE_ID,
host=CONF.host)
volume = tests_utils.create_volume(
self.context,
group_id=group.id,
host=group.host,
volume_type_id=fake.VOLUME_TYPE_ID)
self.volume.create_volume(self.context, volume)
# Create a bootable volume
bootable_vol_params = {'status': 'creating', 'host': CONF.host,
'size': 1, 'bootable': True}
bootable_vol = tests_utils.create_volume(self.context,
group_id=group.id,
**bootable_vol_params)
# Create a common volume
self.volume.create_volume(self.context, bootable_vol)
volume_ids = [volume.id, bootable_vol.id]
group_snapshot_returns = self._create_group_snapshot(group.id,
volume_ids)
group_snapshot = group_snapshot_returns[0]
self.volume.create_group_snapshot(self.context, group_snapshot)
self.assertEqual(group_snapshot.id,
objects.GroupSnapshot.get_by_id(
context.get_admin_context(),
group_snapshot.id).id)
self.assertTrue(mock_create_grpsnap.called)
| apache-2.0 |
CTSRD-SOAAP/chromium-42.0.2311.135 | build/android/pylib/valgrind_tools.py | 40 | 9131 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Classes in this file define additional actions that need to be taken to run a
test under some kind of runtime error detection tool.
The interface is intended to be used as follows.
1. For tests that simply run a native process (i.e. no activity is spawned):
Call tool.CopyFiles(device).
Prepend test command line with tool.GetTestWrapper().
2. For tests that spawn an activity:
Call tool.CopyFiles(device).
Call tool.SetupEnvironment().
Run the test as usual.
Call tool.CleanUpEnvironment().
"""
# pylint: disable=R0201
import glob
import logging
import os.path
import subprocess
import sys
from pylib.constants import DIR_SOURCE_ROOT
from pylib.device import device_errors
def SetChromeTimeoutScale(device, scale):
"""Sets the timeout scale in /data/local/tmp/chrome_timeout_scale to scale."""
path = '/data/local/tmp/chrome_timeout_scale'
if not scale or scale == 1.0:
# Delete if scale is None/0.0/1.0 since the default timeout scale is 1.0
device.RunShellCommand('rm %s' % path)
else:
device.WriteFile(path, '%f' % scale, as_root=True)
class BaseTool(object):
"""A tool that does nothing."""
def __init__(self):
"""Does nothing."""
pass
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ''
def GetUtilWrapper(self):
"""Returns the wrapper name for the utilities.
Returns:
A string that is to be prepended to the command line of utility
processes (forwarder, etc.).
"""
return ''
@classmethod
def CopyFiles(cls, device):
"""Copies tool-specific files to the device, create directories, etc."""
pass
def SetupEnvironment(self):
"""Sets up the system environment for a test.
This is a good place to set system properties.
"""
pass
def CleanUpEnvironment(self):
"""Cleans up environment."""
pass
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 1.0
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns:
True if this tool can not work with stripped binaries.
"""
return False
class AddressSanitizerTool(BaseTool):
"""AddressSanitizer tool."""
WRAPPER_NAME = '/system/bin/asanwrapper'
# Disable memcmp overlap check.There are blobs (gl drivers)
# on some android devices that use memcmp on overlapping regions,
# nothing we can do about that.
EXTRA_OPTIONS = 'strict_memcmp=0,use_sigaltstack=1'
def __init__(self, device):
super(AddressSanitizerTool, self).__init__()
self._device = device
# Configure AndroidCommands to run utils (such as md5sum_bin) under ASan.
# This is required because ASan is a compiler-based tool, and md5sum
# includes instrumented code from base.
device.old_interface.SetUtilWrapper(self.GetUtilWrapper())
@classmethod
def CopyFiles(cls, device):
"""Copies ASan tools to the device."""
libs = glob.glob(os.path.join(DIR_SOURCE_ROOT,
'third_party/llvm-build/Release+Asserts/',
'lib/clang/*/lib/linux/',
'libclang_rt.asan-arm-android.so'))
assert len(libs) == 1
subprocess.call(
[os.path.join(
DIR_SOURCE_ROOT,
'tools/android/asan/third_party/asan_device_setup.sh'),
'--device', str(device),
'--lib', libs[0],
'--extra-options', AddressSanitizerTool.EXTRA_OPTIONS])
device.WaitUntilFullyBooted()
def GetTestWrapper(self):
return AddressSanitizerTool.WRAPPER_NAME
def GetUtilWrapper(self):
"""Returns the wrapper for utilities, such as forwarder.
AddressSanitizer wrapper must be added to all instrumented binaries,
including forwarder and the like. This can be removed if such binaries
were built without instrumentation. """
return self.GetTestWrapper()
def SetupEnvironment(self):
try:
self._device.EnableRoot()
except device_errors.CommandFailedError as e:
# Try to set the timeout scale anyway.
# TODO(jbudorick) Handle this exception appropriately after interface
# conversions are finished.
logging.error(str(e))
SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
def CleanUpEnvironment(self):
SetChromeTimeoutScale(self._device, None)
def GetTimeoutScale(self):
# Very slow startup.
return 20.0
class ValgrindTool(BaseTool):
"""Base abstract class for Valgrind tools."""
VG_DIR = '/data/local/tmp/valgrind'
VGLOGS_DIR = '/data/local/tmp/vglogs'
def __init__(self, device):
super(ValgrindTool, self).__init__()
self._device = device
# exactly 31 chars, SystemProperties::PROP_NAME_MAX
self._wrap_properties = ['wrap.com.google.android.apps.ch',
'wrap.org.chromium.native_test']
@classmethod
def CopyFiles(cls, device):
"""Copies Valgrind tools to the device."""
device.RunShellCommand(
'rm -r %s; mkdir %s' % (ValgrindTool.VG_DIR, ValgrindTool.VG_DIR))
device.RunShellCommand(
'rm -r %s; mkdir %s' % (ValgrindTool.VGLOGS_DIR,
ValgrindTool.VGLOGS_DIR))
files = cls.GetFilesForTool()
device.PushChangedFiles(
[((os.path.join(DIR_SOURCE_ROOT, f),
os.path.join(ValgrindTool.VG_DIR, os.path.basename(f)))
for f in files)])
def SetupEnvironment(self):
"""Sets up device environment."""
self._device.RunShellCommand('chmod 777 /data/local/tmp')
self._device.RunShellCommand('setenforce 0')
for prop in self._wrap_properties:
self._device.RunShellCommand(
'setprop %s "logwrapper %s"' % (prop, self.GetTestWrapper()))
SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
def CleanUpEnvironment(self):
"""Cleans up device environment."""
for prop in self._wrap_properties:
self._device.RunShellCommand('setprop %s ""' % (prop,))
SetChromeTimeoutScale(self._device, None)
@staticmethod
def GetFilesForTool():
"""Returns a list of file names for the tool."""
raise NotImplementedError()
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns:
True if this tool can not work with stripped binaries.
"""
return True
class MemcheckTool(ValgrindTool):
"""Memcheck tool."""
def __init__(self, device):
super(MemcheckTool, self).__init__(device)
@staticmethod
def GetFilesForTool():
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper.sh',
'tools/valgrind/memcheck/suppressions.txt',
'tools/valgrind/memcheck/suppressions_android.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30
class TSanTool(ValgrindTool):
"""ThreadSanitizer tool. See http://code.google.com/p/data-race-test ."""
def __init__(self, device):
super(TSanTool, self).__init__(device)
@staticmethod
def GetFilesForTool():
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',
'tools/valgrind/tsan/suppressions.txt',
'tools/valgrind/tsan/suppressions_android.txt',
'tools/valgrind/tsan/ignores.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper-tsan.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30.0
TOOL_REGISTRY = {
'memcheck': MemcheckTool,
'memcheck-renderer': MemcheckTool,
'tsan': TSanTool,
'tsan-renderer': TSanTool,
'asan': AddressSanitizerTool,
}
def CreateTool(tool_name, device):
"""Creates a tool with the specified tool name.
Args:
tool_name: Name of the tool to create.
device: A DeviceUtils instance.
Returns:
A tool for the specified tool_name.
"""
if not tool_name:
return BaseTool()
ctor = TOOL_REGISTRY.get(tool_name)
if ctor:
return ctor(device)
else:
print 'Unknown tool %s, available tools: %s' % (
tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
sys.exit(1)
def PushFilesForTool(tool_name, device):
"""Pushes the files required for |tool_name| to |device|.
Args:
tool_name: Name of the tool to create.
device: A DeviceUtils instance.
"""
if not tool_name:
return
clazz = TOOL_REGISTRY.get(tool_name)
if clazz:
clazz.CopyFiles(device)
else:
print 'Unknown tool %s, available tools: %s' % (
tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
sys.exit(1)
| bsd-3-clause |
globau/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/testdata/handlers/blank_wsh.py | 499 | 1557 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# intentionally left blank
| mpl-2.0 |
wbc2010/django1.2.5 | django/core/mail/backends/filebased.py | 394 | 2485 | """Email backend that writes messages to a file."""
import datetime
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail.backends.console import EmailBackend as ConsoleEmailBackend
class EmailBackend(ConsoleEmailBackend):
def __init__(self, *args, **kwargs):
self._fname = None
if 'file_path' in kwargs:
self.file_path = kwargs.pop('file_path')
else:
self.file_path = getattr(settings, 'EMAIL_FILE_PATH',None)
# Make sure self.file_path is a string.
if not isinstance(self.file_path, basestring):
raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path)
self.file_path = os.path.abspath(self.file_path)
# Make sure that self.file_path is an directory if it exists.
if os.path.exists(self.file_path) and not os.path.isdir(self.file_path):
raise ImproperlyConfigured('Path for saving email messages exists, but is not a directory: %s' % self.file_path)
# Try to create it, if it not exists.
elif not os.path.exists(self.file_path):
try:
os.makedirs(self.file_path)
except OSError, err:
raise ImproperlyConfigured('Could not create directory for saving email messages: %s (%s)' % (self.file_path, err))
# Make sure that self.file_path is writable.
if not os.access(self.file_path, os.W_OK):
raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path)
# Finally, call super().
# Since we're using the console-based backend as a base,
# force the stream to be None, so we don't default to stdout
kwargs['stream'] = None
super(EmailBackend, self).__init__(*args, **kwargs)
def _get_filename(self):
"""Return a unique file name."""
if self._fname is None:
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
fname = "%s-%s.log" % (timestamp, abs(id(self)))
self._fname = os.path.join(self.file_path, fname)
return self._fname
def open(self):
if self.stream is None:
self.stream = open(self._get_filename(), 'a')
return True
return False
def close(self):
try:
if self.stream is not None:
self.stream.close()
finally:
self.stream = None
| bsd-3-clause |
brainelectronics/towerdefense | _build/lib/pyglet/gl/glu.py | 45 | 25679 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for /usr/include/GL/glu.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
from pyglet.gl.lib import link_GLU as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for /usr/include/GL/glu.h
GLU_EXT_object_space_tess = 1 # /usr/include/GL/glu.h:71
GLU_EXT_nurbs_tessellator = 1 # /usr/include/GL/glu.h:72
GLU_FALSE = 0 # /usr/include/GL/glu.h:75
GLU_TRUE = 1 # /usr/include/GL/glu.h:76
GLU_VERSION_1_1 = 1 # /usr/include/GL/glu.h:79
GLU_VERSION_1_2 = 1 # /usr/include/GL/glu.h:80
GLU_VERSION_1_3 = 1 # /usr/include/GL/glu.h:81
GLU_VERSION = 100800 # /usr/include/GL/glu.h:84
GLU_EXTENSIONS = 100801 # /usr/include/GL/glu.h:85
GLU_INVALID_ENUM = 100900 # /usr/include/GL/glu.h:88
GLU_INVALID_VALUE = 100901 # /usr/include/GL/glu.h:89
GLU_OUT_OF_MEMORY = 100902 # /usr/include/GL/glu.h:90
GLU_INCOMPATIBLE_GL_VERSION = 100903 # /usr/include/GL/glu.h:91
GLU_INVALID_OPERATION = 100904 # /usr/include/GL/glu.h:92
GLU_OUTLINE_POLYGON = 100240 # /usr/include/GL/glu.h:96
GLU_OUTLINE_PATCH = 100241 # /usr/include/GL/glu.h:97
GLU_NURBS_ERROR = 100103 # /usr/include/GL/glu.h:100
GLU_ERROR = 100103 # /usr/include/GL/glu.h:101
GLU_NURBS_BEGIN = 100164 # /usr/include/GL/glu.h:102
GLU_NURBS_BEGIN_EXT = 100164 # /usr/include/GL/glu.h:103
GLU_NURBS_VERTEX = 100165 # /usr/include/GL/glu.h:104
GLU_NURBS_VERTEX_EXT = 100165 # /usr/include/GL/glu.h:105
GLU_NURBS_NORMAL = 100166 # /usr/include/GL/glu.h:106
GLU_NURBS_NORMAL_EXT = 100166 # /usr/include/GL/glu.h:107
GLU_NURBS_COLOR = 100167 # /usr/include/GL/glu.h:108
GLU_NURBS_COLOR_EXT = 100167 # /usr/include/GL/glu.h:109
GLU_NURBS_TEXTURE_COORD = 100168 # /usr/include/GL/glu.h:110
GLU_NURBS_TEX_COORD_EXT = 100168 # /usr/include/GL/glu.h:111
GLU_NURBS_END = 100169 # /usr/include/GL/glu.h:112
GLU_NURBS_END_EXT = 100169 # /usr/include/GL/glu.h:113
GLU_NURBS_BEGIN_DATA = 100170 # /usr/include/GL/glu.h:114
GLU_NURBS_BEGIN_DATA_EXT = 100170 # /usr/include/GL/glu.h:115
GLU_NURBS_VERTEX_DATA = 100171 # /usr/include/GL/glu.h:116
GLU_NURBS_VERTEX_DATA_EXT = 100171 # /usr/include/GL/glu.h:117
GLU_NURBS_NORMAL_DATA = 100172 # /usr/include/GL/glu.h:118
GLU_NURBS_NORMAL_DATA_EXT = 100172 # /usr/include/GL/glu.h:119
GLU_NURBS_COLOR_DATA = 100173 # /usr/include/GL/glu.h:120
GLU_NURBS_COLOR_DATA_EXT = 100173 # /usr/include/GL/glu.h:121
GLU_NURBS_TEXTURE_COORD_DATA = 100174 # /usr/include/GL/glu.h:122
GLU_NURBS_TEX_COORD_DATA_EXT = 100174 # /usr/include/GL/glu.h:123
GLU_NURBS_END_DATA = 100175 # /usr/include/GL/glu.h:124
GLU_NURBS_END_DATA_EXT = 100175 # /usr/include/GL/glu.h:125
GLU_NURBS_ERROR1 = 100251 # /usr/include/GL/glu.h:128
GLU_NURBS_ERROR2 = 100252 # /usr/include/GL/glu.h:129
GLU_NURBS_ERROR3 = 100253 # /usr/include/GL/glu.h:130
GLU_NURBS_ERROR4 = 100254 # /usr/include/GL/glu.h:131
GLU_NURBS_ERROR5 = 100255 # /usr/include/GL/glu.h:132
GLU_NURBS_ERROR6 = 100256 # /usr/include/GL/glu.h:133
GLU_NURBS_ERROR7 = 100257 # /usr/include/GL/glu.h:134
GLU_NURBS_ERROR8 = 100258 # /usr/include/GL/glu.h:135
GLU_NURBS_ERROR9 = 100259 # /usr/include/GL/glu.h:136
GLU_NURBS_ERROR10 = 100260 # /usr/include/GL/glu.h:137
GLU_NURBS_ERROR11 = 100261 # /usr/include/GL/glu.h:138
GLU_NURBS_ERROR12 = 100262 # /usr/include/GL/glu.h:139
GLU_NURBS_ERROR13 = 100263 # /usr/include/GL/glu.h:140
GLU_NURBS_ERROR14 = 100264 # /usr/include/GL/glu.h:141
GLU_NURBS_ERROR15 = 100265 # /usr/include/GL/glu.h:142
GLU_NURBS_ERROR16 = 100266 # /usr/include/GL/glu.h:143
GLU_NURBS_ERROR17 = 100267 # /usr/include/GL/glu.h:144
GLU_NURBS_ERROR18 = 100268 # /usr/include/GL/glu.h:145
GLU_NURBS_ERROR19 = 100269 # /usr/include/GL/glu.h:146
GLU_NURBS_ERROR20 = 100270 # /usr/include/GL/glu.h:147
GLU_NURBS_ERROR21 = 100271 # /usr/include/GL/glu.h:148
GLU_NURBS_ERROR22 = 100272 # /usr/include/GL/glu.h:149
GLU_NURBS_ERROR23 = 100273 # /usr/include/GL/glu.h:150
GLU_NURBS_ERROR24 = 100274 # /usr/include/GL/glu.h:151
GLU_NURBS_ERROR25 = 100275 # /usr/include/GL/glu.h:152
GLU_NURBS_ERROR26 = 100276 # /usr/include/GL/glu.h:153
GLU_NURBS_ERROR27 = 100277 # /usr/include/GL/glu.h:154
GLU_NURBS_ERROR28 = 100278 # /usr/include/GL/glu.h:155
GLU_NURBS_ERROR29 = 100279 # /usr/include/GL/glu.h:156
GLU_NURBS_ERROR30 = 100280 # /usr/include/GL/glu.h:157
GLU_NURBS_ERROR31 = 100281 # /usr/include/GL/glu.h:158
GLU_NURBS_ERROR32 = 100282 # /usr/include/GL/glu.h:159
GLU_NURBS_ERROR33 = 100283 # /usr/include/GL/glu.h:160
GLU_NURBS_ERROR34 = 100284 # /usr/include/GL/glu.h:161
GLU_NURBS_ERROR35 = 100285 # /usr/include/GL/glu.h:162
GLU_NURBS_ERROR36 = 100286 # /usr/include/GL/glu.h:163
GLU_NURBS_ERROR37 = 100287 # /usr/include/GL/glu.h:164
GLU_AUTO_LOAD_MATRIX = 100200 # /usr/include/GL/glu.h:167
GLU_CULLING = 100201 # /usr/include/GL/glu.h:168
GLU_SAMPLING_TOLERANCE = 100203 # /usr/include/GL/glu.h:169
GLU_DISPLAY_MODE = 100204 # /usr/include/GL/glu.h:170
GLU_PARAMETRIC_TOLERANCE = 100202 # /usr/include/GL/glu.h:171
GLU_SAMPLING_METHOD = 100205 # /usr/include/GL/glu.h:172
GLU_U_STEP = 100206 # /usr/include/GL/glu.h:173
GLU_V_STEP = 100207 # /usr/include/GL/glu.h:174
GLU_NURBS_MODE = 100160 # /usr/include/GL/glu.h:175
GLU_NURBS_MODE_EXT = 100160 # /usr/include/GL/glu.h:176
GLU_NURBS_TESSELLATOR = 100161 # /usr/include/GL/glu.h:177
GLU_NURBS_TESSELLATOR_EXT = 100161 # /usr/include/GL/glu.h:178
GLU_NURBS_RENDERER = 100162 # /usr/include/GL/glu.h:179
GLU_NURBS_RENDERER_EXT = 100162 # /usr/include/GL/glu.h:180
GLU_OBJECT_PARAMETRIC_ERROR = 100208 # /usr/include/GL/glu.h:183
GLU_OBJECT_PARAMETRIC_ERROR_EXT = 100208 # /usr/include/GL/glu.h:184
GLU_OBJECT_PATH_LENGTH = 100209 # /usr/include/GL/glu.h:185
GLU_OBJECT_PATH_LENGTH_EXT = 100209 # /usr/include/GL/glu.h:186
GLU_PATH_LENGTH = 100215 # /usr/include/GL/glu.h:187
GLU_PARAMETRIC_ERROR = 100216 # /usr/include/GL/glu.h:188
GLU_DOMAIN_DISTANCE = 100217 # /usr/include/GL/glu.h:189
GLU_MAP1_TRIM_2 = 100210 # /usr/include/GL/glu.h:192
GLU_MAP1_TRIM_3 = 100211 # /usr/include/GL/glu.h:193
GLU_POINT = 100010 # /usr/include/GL/glu.h:196
GLU_LINE = 100011 # /usr/include/GL/glu.h:197
GLU_FILL = 100012 # /usr/include/GL/glu.h:198
GLU_SILHOUETTE = 100013 # /usr/include/GL/glu.h:199
GLU_SMOOTH = 100000 # /usr/include/GL/glu.h:205
GLU_FLAT = 100001 # /usr/include/GL/glu.h:206
GLU_NONE = 100002 # /usr/include/GL/glu.h:207
GLU_OUTSIDE = 100020 # /usr/include/GL/glu.h:210
GLU_INSIDE = 100021 # /usr/include/GL/glu.h:211
GLU_TESS_BEGIN = 100100 # /usr/include/GL/glu.h:214
GLU_BEGIN = 100100 # /usr/include/GL/glu.h:215
GLU_TESS_VERTEX = 100101 # /usr/include/GL/glu.h:216
GLU_VERTEX = 100101 # /usr/include/GL/glu.h:217
GLU_TESS_END = 100102 # /usr/include/GL/glu.h:218
GLU_END = 100102 # /usr/include/GL/glu.h:219
GLU_TESS_ERROR = 100103 # /usr/include/GL/glu.h:220
GLU_TESS_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:221
GLU_EDGE_FLAG = 100104 # /usr/include/GL/glu.h:222
GLU_TESS_COMBINE = 100105 # /usr/include/GL/glu.h:223
GLU_TESS_BEGIN_DATA = 100106 # /usr/include/GL/glu.h:224
GLU_TESS_VERTEX_DATA = 100107 # /usr/include/GL/glu.h:225
GLU_TESS_END_DATA = 100108 # /usr/include/GL/glu.h:226
GLU_TESS_ERROR_DATA = 100109 # /usr/include/GL/glu.h:227
GLU_TESS_EDGE_FLAG_DATA = 100110 # /usr/include/GL/glu.h:228
GLU_TESS_COMBINE_DATA = 100111 # /usr/include/GL/glu.h:229
GLU_CW = 100120 # /usr/include/GL/glu.h:232
GLU_CCW = 100121 # /usr/include/GL/glu.h:233
GLU_INTERIOR = 100122 # /usr/include/GL/glu.h:234
GLU_EXTERIOR = 100123 # /usr/include/GL/glu.h:235
GLU_UNKNOWN = 100124 # /usr/include/GL/glu.h:236
GLU_TESS_WINDING_RULE = 100140 # /usr/include/GL/glu.h:239
GLU_TESS_BOUNDARY_ONLY = 100141 # /usr/include/GL/glu.h:240
GLU_TESS_TOLERANCE = 100142 # /usr/include/GL/glu.h:241
GLU_TESS_ERROR1 = 100151 # /usr/include/GL/glu.h:244
GLU_TESS_ERROR2 = 100152 # /usr/include/GL/glu.h:245
GLU_TESS_ERROR3 = 100153 # /usr/include/GL/glu.h:246
GLU_TESS_ERROR4 = 100154 # /usr/include/GL/glu.h:247
GLU_TESS_ERROR5 = 100155 # /usr/include/GL/glu.h:248
GLU_TESS_ERROR6 = 100156 # /usr/include/GL/glu.h:249
GLU_TESS_ERROR7 = 100157 # /usr/include/GL/glu.h:250
GLU_TESS_ERROR8 = 100158 # /usr/include/GL/glu.h:251
GLU_TESS_MISSING_BEGIN_POLYGON = 100151 # /usr/include/GL/glu.h:252
GLU_TESS_MISSING_BEGIN_CONTOUR = 100152 # /usr/include/GL/glu.h:253
GLU_TESS_MISSING_END_POLYGON = 100153 # /usr/include/GL/glu.h:254
GLU_TESS_MISSING_END_CONTOUR = 100154 # /usr/include/GL/glu.h:255
GLU_TESS_COORD_TOO_LARGE = 100155 # /usr/include/GL/glu.h:256
GLU_TESS_NEED_COMBINE_CALLBACK = 100156 # /usr/include/GL/glu.h:257
GLU_TESS_WINDING_ODD = 100130 # /usr/include/GL/glu.h:260
GLU_TESS_WINDING_NONZERO = 100131 # /usr/include/GL/glu.h:261
GLU_TESS_WINDING_POSITIVE = 100132 # /usr/include/GL/glu.h:262
GLU_TESS_WINDING_NEGATIVE = 100133 # /usr/include/GL/glu.h:263
GLU_TESS_WINDING_ABS_GEQ_TWO = 100134 # /usr/include/GL/glu.h:264
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUnurbs(Structure):
__slots__ = [
]
struct_GLUnurbs._fields_ = [
('_opaque_struct', c_int)
]
GLUnurbs = struct_GLUnurbs # /usr/include/GL/glu.h:274
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUquadric(Structure):
__slots__ = [
]
struct_GLUquadric._fields_ = [
('_opaque_struct', c_int)
]
GLUquadric = struct_GLUquadric # /usr/include/GL/glu.h:275
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
class struct_GLUtesselator(Structure):
__slots__ = [
]
struct_GLUtesselator._fields_ = [
('_opaque_struct', c_int)
]
GLUtesselator = struct_GLUtesselator # /usr/include/GL/glu.h:276
GLUnurbsObj = GLUnurbs # /usr/include/GL/glu.h:279
GLUquadricObj = GLUquadric # /usr/include/GL/glu.h:280
GLUtesselatorObj = GLUtesselator # /usr/include/GL/glu.h:281
GLUtriangulatorObj = GLUtesselator # /usr/include/GL/glu.h:282
GLU_TESS_MAX_COORD = 9.9999999999999998e+149 # /usr/include/GL/glu.h:284
_GLUfuncptr = CFUNCTYPE(None) # /usr/include/GL/glu.h:287
# /usr/include/GL/glu.h:289
gluBeginCurve = _link_function('gluBeginCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:290
gluBeginPolygon = _link_function('gluBeginPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:291
gluBeginSurface = _link_function('gluBeginSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:292
gluBeginTrim = _link_function('gluBeginTrim', None, [POINTER(GLUnurbs)], None)
GLint = c_int # /usr/include/GL/gl.h:159
GLenum = c_uint # /usr/include/GL/gl.h:153
GLsizei = c_int # /usr/include/GL/gl.h:163
# /usr/include/GL/glu.h:293
gluBuild1DMipmapLevels = _link_function('gluBuild1DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:294
gluBuild1DMipmaps = _link_function('gluBuild1DMipmaps', GLint, [GLenum, GLint, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:295
gluBuild2DMipmapLevels = _link_function('gluBuild2DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:296
gluBuild2DMipmaps = _link_function('gluBuild2DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
# /usr/include/GL/glu.h:297
gluBuild3DMipmapLevels = _link_function('gluBuild3DMipmapLevels', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, GLint, GLint, GLint, POINTER(None)], None)
# /usr/include/GL/glu.h:298
gluBuild3DMipmaps = _link_function('gluBuild3DMipmaps', GLint, [GLenum, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(None)], None)
GLboolean = c_ubyte # /usr/include/GL/gl.h:154
GLubyte = c_ubyte # /usr/include/GL/gl.h:160
# /usr/include/GL/glu.h:299
gluCheckExtension = _link_function('gluCheckExtension', GLboolean, [POINTER(GLubyte), POINTER(GLubyte)], None)
GLdouble = c_double # /usr/include/GL/gl.h:166
# /usr/include/GL/glu.h:300
gluCylinder = _link_function('gluCylinder', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:301
gluDeleteNurbsRenderer = _link_function('gluDeleteNurbsRenderer', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:302
gluDeleteQuadric = _link_function('gluDeleteQuadric', None, [POINTER(GLUquadric)], None)
# /usr/include/GL/glu.h:303
gluDeleteTess = _link_function('gluDeleteTess', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:304
gluDisk = _link_function('gluDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:305
gluEndCurve = _link_function('gluEndCurve', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:306
gluEndPolygon = _link_function('gluEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:307
gluEndSurface = _link_function('gluEndSurface', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:308
gluEndTrim = _link_function('gluEndTrim', None, [POINTER(GLUnurbs)], None)
# /usr/include/GL/glu.h:309
gluErrorString = _link_function('gluErrorString', POINTER(GLubyte), [GLenum], None)
GLfloat = c_float # /usr/include/GL/gl.h:164
# /usr/include/GL/glu.h:310
gluGetNurbsProperty = _link_function('gluGetNurbsProperty', None, [POINTER(GLUnurbs), GLenum, POINTER(GLfloat)], None)
# /usr/include/GL/glu.h:311
gluGetString = _link_function('gluGetString', POINTER(GLubyte), [GLenum], None)
# /usr/include/GL/glu.h:312
gluGetTessProperty = _link_function('gluGetTessProperty', None, [POINTER(GLUtesselator), GLenum, POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:313
gluLoadSamplingMatrices = _link_function('gluLoadSamplingMatrices', None, [POINTER(GLUnurbs), POINTER(GLfloat), POINTER(GLfloat), POINTER(GLint)], None)
# /usr/include/GL/glu.h:314
gluLookAt = _link_function('gluLookAt', None, [GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:315
gluNewNurbsRenderer = _link_function('gluNewNurbsRenderer', POINTER(GLUnurbs), [], None)
# /usr/include/GL/glu.h:316
gluNewQuadric = _link_function('gluNewQuadric', POINTER(GLUquadric), [], None)
# /usr/include/GL/glu.h:317
gluNewTess = _link_function('gluNewTess', POINTER(GLUtesselator), [], None)
# /usr/include/GL/glu.h:318
gluNextContour = _link_function('gluNextContour', None, [POINTER(GLUtesselator), GLenum], None)
# /usr/include/GL/glu.h:319
gluNurbsCallback = _link_function('gluNurbsCallback', None, [POINTER(GLUnurbs), GLenum, _GLUfuncptr], None)
GLvoid = None # /usr/include/GL/gl.h:156
# /usr/include/GL/glu.h:320
gluNurbsCallbackData = _link_function('gluNurbsCallbackData', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:321
gluNurbsCallbackDataEXT = _link_function('gluNurbsCallbackDataEXT', None, [POINTER(GLUnurbs), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:322
gluNurbsCurve = _link_function('gluNurbsCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:323
gluNurbsProperty = _link_function('gluNurbsProperty', None, [POINTER(GLUnurbs), GLenum, GLfloat], None)
# /usr/include/GL/glu.h:324
gluNurbsSurface = _link_function('gluNurbsSurface', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, POINTER(GLfloat), GLint, GLint, POINTER(GLfloat), GLint, GLint, GLenum], None)
# /usr/include/GL/glu.h:325
gluOrtho2D = _link_function('gluOrtho2D', None, [GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:326
gluPartialDisk = _link_function('gluPartialDisk', None, [POINTER(GLUquadric), GLdouble, GLdouble, GLint, GLint, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:327
gluPerspective = _link_function('gluPerspective', None, [GLdouble, GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:328
gluPickMatrix = _link_function('gluPickMatrix', None, [GLdouble, GLdouble, GLdouble, GLdouble, POINTER(GLint)], None)
# /usr/include/GL/glu.h:329
gluProject = _link_function('gluProject', GLint, [GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:330
gluPwlCurve = _link_function('gluPwlCurve', None, [POINTER(GLUnurbs), GLint, POINTER(GLfloat), GLint, GLenum], None)
# /usr/include/GL/glu.h:331
gluQuadricCallback = _link_function('gluQuadricCallback', None, [POINTER(GLUquadric), GLenum, _GLUfuncptr], None)
# /usr/include/GL/glu.h:332
gluQuadricDrawStyle = _link_function('gluQuadricDrawStyle', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:333
gluQuadricNormals = _link_function('gluQuadricNormals', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:334
gluQuadricOrientation = _link_function('gluQuadricOrientation', None, [POINTER(GLUquadric), GLenum], None)
# /usr/include/GL/glu.h:335
gluQuadricTexture = _link_function('gluQuadricTexture', None, [POINTER(GLUquadric), GLboolean], None)
# /usr/include/GL/glu.h:336
gluScaleImage = _link_function('gluScaleImage', GLint, [GLenum, GLsizei, GLsizei, GLenum, POINTER(None), GLsizei, GLsizei, GLenum, POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:337
gluSphere = _link_function('gluSphere', None, [POINTER(GLUquadric), GLdouble, GLint, GLint], None)
# /usr/include/GL/glu.h:338
gluTessBeginContour = _link_function('gluTessBeginContour', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:339
gluTessBeginPolygon = _link_function('gluTessBeginPolygon', None, [POINTER(GLUtesselator), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:340
gluTessCallback = _link_function('gluTessCallback', None, [POINTER(GLUtesselator), GLenum, _GLUfuncptr], None)
# /usr/include/GL/glu.h:341
gluTessEndContour = _link_function('gluTessEndContour', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:342
gluTessEndPolygon = _link_function('gluTessEndPolygon', None, [POINTER(GLUtesselator)], None)
# /usr/include/GL/glu.h:343
gluTessNormal = _link_function('gluTessNormal', None, [POINTER(GLUtesselator), GLdouble, GLdouble, GLdouble], None)
# /usr/include/GL/glu.h:344
gluTessProperty = _link_function('gluTessProperty', None, [POINTER(GLUtesselator), GLenum, GLdouble], None)
# /usr/include/GL/glu.h:345
gluTessVertex = _link_function('gluTessVertex', None, [POINTER(GLUtesselator), POINTER(GLdouble), POINTER(GLvoid)], None)
# /usr/include/GL/glu.h:346
gluUnProject = _link_function('gluUnProject', GLint, [GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
# /usr/include/GL/glu.h:347
gluUnProject4 = _link_function('gluUnProject4', GLint, [GLdouble, GLdouble, GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLint), GLdouble, GLdouble, POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble), POINTER(GLdouble)], None)
__all__ = ['GLU_EXT_object_space_tess', 'GLU_EXT_nurbs_tessellator',
'GLU_FALSE', 'GLU_TRUE', 'GLU_VERSION_1_1', 'GLU_VERSION_1_2',
'GLU_VERSION_1_3', 'GLU_VERSION', 'GLU_EXTENSIONS', 'GLU_INVALID_ENUM',
'GLU_INVALID_VALUE', 'GLU_OUT_OF_MEMORY', 'GLU_INCOMPATIBLE_GL_VERSION',
'GLU_INVALID_OPERATION', 'GLU_OUTLINE_POLYGON', 'GLU_OUTLINE_PATCH',
'GLU_NURBS_ERROR', 'GLU_ERROR', 'GLU_NURBS_BEGIN', 'GLU_NURBS_BEGIN_EXT',
'GLU_NURBS_VERTEX', 'GLU_NURBS_VERTEX_EXT', 'GLU_NURBS_NORMAL',
'GLU_NURBS_NORMAL_EXT', 'GLU_NURBS_COLOR', 'GLU_NURBS_COLOR_EXT',
'GLU_NURBS_TEXTURE_COORD', 'GLU_NURBS_TEX_COORD_EXT', 'GLU_NURBS_END',
'GLU_NURBS_END_EXT', 'GLU_NURBS_BEGIN_DATA', 'GLU_NURBS_BEGIN_DATA_EXT',
'GLU_NURBS_VERTEX_DATA', 'GLU_NURBS_VERTEX_DATA_EXT', 'GLU_NURBS_NORMAL_DATA',
'GLU_NURBS_NORMAL_DATA_EXT', 'GLU_NURBS_COLOR_DATA',
'GLU_NURBS_COLOR_DATA_EXT', 'GLU_NURBS_TEXTURE_COORD_DATA',
'GLU_NURBS_TEX_COORD_DATA_EXT', 'GLU_NURBS_END_DATA',
'GLU_NURBS_END_DATA_EXT', 'GLU_NURBS_ERROR1', 'GLU_NURBS_ERROR2',
'GLU_NURBS_ERROR3', 'GLU_NURBS_ERROR4', 'GLU_NURBS_ERROR5',
'GLU_NURBS_ERROR6', 'GLU_NURBS_ERROR7', 'GLU_NURBS_ERROR8',
'GLU_NURBS_ERROR9', 'GLU_NURBS_ERROR10', 'GLU_NURBS_ERROR11',
'GLU_NURBS_ERROR12', 'GLU_NURBS_ERROR13', 'GLU_NURBS_ERROR14',
'GLU_NURBS_ERROR15', 'GLU_NURBS_ERROR16', 'GLU_NURBS_ERROR17',
'GLU_NURBS_ERROR18', 'GLU_NURBS_ERROR19', 'GLU_NURBS_ERROR20',
'GLU_NURBS_ERROR21', 'GLU_NURBS_ERROR22', 'GLU_NURBS_ERROR23',
'GLU_NURBS_ERROR24', 'GLU_NURBS_ERROR25', 'GLU_NURBS_ERROR26',
'GLU_NURBS_ERROR27', 'GLU_NURBS_ERROR28', 'GLU_NURBS_ERROR29',
'GLU_NURBS_ERROR30', 'GLU_NURBS_ERROR31', 'GLU_NURBS_ERROR32',
'GLU_NURBS_ERROR33', 'GLU_NURBS_ERROR34', 'GLU_NURBS_ERROR35',
'GLU_NURBS_ERROR36', 'GLU_NURBS_ERROR37', 'GLU_AUTO_LOAD_MATRIX',
'GLU_CULLING', 'GLU_SAMPLING_TOLERANCE', 'GLU_DISPLAY_MODE',
'GLU_PARAMETRIC_TOLERANCE', 'GLU_SAMPLING_METHOD', 'GLU_U_STEP', 'GLU_V_STEP',
'GLU_NURBS_MODE', 'GLU_NURBS_MODE_EXT', 'GLU_NURBS_TESSELLATOR',
'GLU_NURBS_TESSELLATOR_EXT', 'GLU_NURBS_RENDERER', 'GLU_NURBS_RENDERER_EXT',
'GLU_OBJECT_PARAMETRIC_ERROR', 'GLU_OBJECT_PARAMETRIC_ERROR_EXT',
'GLU_OBJECT_PATH_LENGTH', 'GLU_OBJECT_PATH_LENGTH_EXT', 'GLU_PATH_LENGTH',
'GLU_PARAMETRIC_ERROR', 'GLU_DOMAIN_DISTANCE', 'GLU_MAP1_TRIM_2',
'GLU_MAP1_TRIM_3', 'GLU_POINT', 'GLU_LINE', 'GLU_FILL', 'GLU_SILHOUETTE',
'GLU_SMOOTH', 'GLU_FLAT', 'GLU_NONE', 'GLU_OUTSIDE', 'GLU_INSIDE',
'GLU_TESS_BEGIN', 'GLU_BEGIN', 'GLU_TESS_VERTEX', 'GLU_VERTEX',
'GLU_TESS_END', 'GLU_END', 'GLU_TESS_ERROR', 'GLU_TESS_EDGE_FLAG',
'GLU_EDGE_FLAG', 'GLU_TESS_COMBINE', 'GLU_TESS_BEGIN_DATA',
'GLU_TESS_VERTEX_DATA', 'GLU_TESS_END_DATA', 'GLU_TESS_ERROR_DATA',
'GLU_TESS_EDGE_FLAG_DATA', 'GLU_TESS_COMBINE_DATA', 'GLU_CW', 'GLU_CCW',
'GLU_INTERIOR', 'GLU_EXTERIOR', 'GLU_UNKNOWN', 'GLU_TESS_WINDING_RULE',
'GLU_TESS_BOUNDARY_ONLY', 'GLU_TESS_TOLERANCE', 'GLU_TESS_ERROR1',
'GLU_TESS_ERROR2', 'GLU_TESS_ERROR3', 'GLU_TESS_ERROR4', 'GLU_TESS_ERROR5',
'GLU_TESS_ERROR6', 'GLU_TESS_ERROR7', 'GLU_TESS_ERROR8',
'GLU_TESS_MISSING_BEGIN_POLYGON', 'GLU_TESS_MISSING_BEGIN_CONTOUR',
'GLU_TESS_MISSING_END_POLYGON', 'GLU_TESS_MISSING_END_CONTOUR',
'GLU_TESS_COORD_TOO_LARGE', 'GLU_TESS_NEED_COMBINE_CALLBACK',
'GLU_TESS_WINDING_ODD', 'GLU_TESS_WINDING_NONZERO',
'GLU_TESS_WINDING_POSITIVE', 'GLU_TESS_WINDING_NEGATIVE',
'GLU_TESS_WINDING_ABS_GEQ_TWO', 'GLUnurbs', 'GLUquadric', 'GLUtesselator',
'GLUnurbsObj', 'GLUquadricObj', 'GLUtesselatorObj', 'GLUtriangulatorObj',
'GLU_TESS_MAX_COORD', '_GLUfuncptr', 'gluBeginCurve', 'gluBeginPolygon',
'gluBeginSurface', 'gluBeginTrim', 'gluBuild1DMipmapLevels',
'gluBuild1DMipmaps', 'gluBuild2DMipmapLevels', 'gluBuild2DMipmaps',
'gluBuild3DMipmapLevels', 'gluBuild3DMipmaps', 'gluCheckExtension',
'gluCylinder', 'gluDeleteNurbsRenderer', 'gluDeleteQuadric', 'gluDeleteTess',
'gluDisk', 'gluEndCurve', 'gluEndPolygon', 'gluEndSurface', 'gluEndTrim',
'gluErrorString', 'gluGetNurbsProperty', 'gluGetString', 'gluGetTessProperty',
'gluLoadSamplingMatrices', 'gluLookAt', 'gluNewNurbsRenderer',
'gluNewQuadric', 'gluNewTess', 'gluNextContour', 'gluNurbsCallback',
'gluNurbsCallbackData', 'gluNurbsCallbackDataEXT', 'gluNurbsCurve',
'gluNurbsProperty', 'gluNurbsSurface', 'gluOrtho2D', 'gluPartialDisk',
'gluPerspective', 'gluPickMatrix', 'gluProject', 'gluPwlCurve',
'gluQuadricCallback', 'gluQuadricDrawStyle', 'gluQuadricNormals',
'gluQuadricOrientation', 'gluQuadricTexture', 'gluScaleImage', 'gluSphere',
'gluTessBeginContour', 'gluTessBeginPolygon', 'gluTessCallback',
'gluTessEndContour', 'gluTessEndPolygon', 'gluTessNormal', 'gluTessProperty',
'gluTessVertex', 'gluUnProject', 'gluUnProject4']
# END GENERATED CONTENT (do not edit above this line)
| bsd-3-clause |
analyseuc3m/ANALYSE-v1 | lms/djangoapps/course_api/blocks/transformers/tests/test_block_counts.py | 12 | 2030 | """
Tests for BlockCountsTransformer.
"""
# pylint: disable=protected-access
from openedx.core.lib.block_structure.factory import BlockStructureFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory
from ..block_counts import BlockCountsTransformer
class TestBlockCountsTransformer(ModuleStoreTestCase):
"""
Test behavior of BlockCountsTransformer
"""
def setUp(self):
super(TestBlockCountsTransformer, self).setUp()
self.course_key = SampleCourseFactory.create().id
self.course_usage_key = self.store.make_course_usage_key(self.course_key)
self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)
def test_transform(self):
# collect phase
BlockCountsTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields()
# transform phase
BlockCountsTransformer(['problem', 'chapter']).transform(usage_info=None, block_structure=self.block_structure)
# block_counts
chapter_x_key = self.course_key.make_usage_key('chapter', 'chapter_x')
block_counts_for_chapter_x = self.block_structure.get_transformer_block_data(
chapter_x_key, BlockCountsTransformer,
)
block_counts_for_course = self.block_structure.get_transformer_block_data(
self.course_usage_key, BlockCountsTransformer,
)
# verify count of chapters
self.assertEquals(block_counts_for_course['chapter'], 2)
# verify count of problems
self.assertEquals(block_counts_for_course['problem'], 6)
self.assertEquals(block_counts_for_chapter_x['problem'], 3)
# verify other block types are not counted
for block_type in ['course', 'html', 'video']:
self.assertNotIn(block_type, block_counts_for_course)
self.assertNotIn(block_type, block_counts_for_chapter_x)
| agpl-3.0 |
pombreda/libforensics | code/lf/win/shell/link/dtypes.py | 13 | 5891 | # Copyright 2010 Michael Murr
#
# This file is part of LibForensics.
#
# LibForensics is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibForensics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LibForensics. If not, see <http://www.gnu.org/licenses/>.
"""Data structures to work with shell link files"""
# local imports
from lf.dtypes import LERecord, BitTypeU32, bit, raw
from lf.win.dtypes import (
FILETIME_LE, COLORREF, DWORD, WORD, BYTE, CLSID_LE, GUID_LE,
LCID_LE
)
from lf.win.con.dtypes import COORD_LE
__docformat__ = "restructuredtext en"
__all__ = [
"HotKey", "ShellLinkHeader", "LinkInfoHeader", "VolumeIDHeader",
"CNRLHeader", "DataBlockHeader", "ConsoleDataBlock", "ConsoleFEDataBlock",
"DarwinDataBlock", "EnvironmentVariableDataBlock",
"IconEnvironmentDataBlock", "KnownFolderDataBlock",
"SpecialFolderDataBlock", "TrackerDataBlock", "TrackerDataBlockFooter",
"DomainRelativeObjId"
]
class LinkFlagsBits(BitTypeU32):
has_idlist = bit
has_link_info = bit
has_name = bit
has_relative_path = bit
has_working_dir = bit
has_args = bit
has_icon_location = bit
is_unicode = bit
force_no_link_info = bit
has_exp_string = bit
run_in_separate_proc = bit
has_logo3_id = bit
has_darwin_id = bit
run_as_user = bit
has_exp_icon = bit
no_pidl_alias = bit
force_unc_name = bit
run_with_shim_layer = bit
force_no_link_track = bit
enable_target_metadata = bit
disable_link_path_tracking = bit
disable_known_folder_rel_tracking = bit
no_kf_alias = bit
allow_link_to_link = bit
unalias_on_save = bit
prefer_environment_path = bit
keep_local_idlist_for_unc_target = bit
# end class LinkFlagsBits
class LinkFlags(LERecord):
field = LinkFlagsBits
# end class LinkFlags
class FileAttributesBits(BitTypeU32):
read_only = bit
hidden = bit
system = bit
reserved1 = bit
directory = bit
archive = bit
reserved2 = bit
normal = bit
temp = bit
sparse = bit
reparse_point = bit
compressed = bit
offline = bit
not_content_indexed = bit
encrypted = bit
# end class FileAttributesBits
class FileAttributes(LERecord):
field = FileAttributesBits
# end class FileAttributes
class HotKey(LERecord):
vkcode = BYTE
vkmod = BYTE
# end class HotKey
class ShellLinkHeader(LERecord):
size = DWORD
clsid = CLSID_LE
flags = LinkFlags
attrs = FileAttributes
btime = FILETIME_LE
atime = FILETIME_LE
mtime = FILETIME_LE
target_size = DWORD
icon_index = DWORD
show_cmd = DWORD
hotkey = HotKey
reserved1 = raw(2)
reserved2 = raw(4)
reserved3 = raw(4)
# end class ShellLinkHeader
class LinkInfoFlags(BitTypeU32):
has_vol_id_and_local_base_path = bit
has_cnrl_and_path_suffix = bit
# end class LinkInfoFlags
class LinkInfoHeader(LERecord):
size = DWORD
header_size = DWORD
flags = LinkInfoFlags
vol_id_offset = DWORD
local_base_path_offset = DWORD
cnrl_offset = DWORD
path_suffix_offset = DWORD
# end class LinkInfoHeader
class VolumeIDHeader(LERecord):
size = DWORD
type = DWORD
serial_num = DWORD
vol_label_offset = DWORD
# end class VolumeIDHeader
class CNRLFlags(BitTypeU32):
valid_device = bit
valid_net_type = bit
# end class CNRLFlags
class CNRLHeader(LERecord):
size = DWORD
flags = CNRLFlags
net_name_offset = DWORD
device_name_offset = DWORD
net_type = DWORD
# end class CNRLHeader
class DataBlockHeader(LERecord):
size = DWORD
sig = DWORD
# end class DataBlockHeader
class ConsoleDataBlock(DataBlockHeader):
fill_attributes = WORD
popup_fill_attributes = WORD
screen_buffer_size = COORD_LE
window_size = COORD_LE
window_origin = COORD_LE
font = DWORD
input_buf_size = DWORD
font_size = DWORD
font_family = DWORD
font_weight = DWORD
face_name = raw(64)
cursor_size = DWORD
full_screen = DWORD
quick_edit = DWORD
insert_mode = DWORD
auto_position = DWORD
history_buf_size = DWORD
history_buf_count = DWORD
history_no_dup = DWORD
color_table = [COLORREF] * 16
# end class ConsoleDataBlock
class ConsoleFEDataBlock(DataBlockHeader):
code_page = LCID_LE
# end class ConsoleFEDataBlock
class DarwinDataBlock(DataBlockHeader):
darwin_data_ansi = raw(260)
darwin_data_uni = raw(520)
# end class DarwinDataBlock
class ExpandableStringsDataBlock(DataBlockHeader):
target_ansi = raw(260)
target_uni = raw(520)
# end class ExpandableStringsDataBlock
class EnvironmentVariableDataBlock(ExpandableStringsDataBlock):
pass
# end class EnvironmentVariableDataBlock
class IconEnvironmentDataBlock(ExpandableStringsDataBlock):
pass
# end class IconEnvironmentDataBlock
class KnownFolderDataBlock(DataBlockHeader):
kf_id = GUID_LE
offset = DWORD
# end class KnownFolderDataBlock
class SpecialFolderDataBlock(DataBlockHeader):
sf_id = DWORD
offset = DWORD
# end class SpecialFolderDataBlock
class DomainRelativeObjId(LERecord):
volume = GUID_LE
object = GUID_LE
# end class DomainRelativeObjId
class TrackerDataBlock(DataBlockHeader):
length = DWORD
version = DWORD
# end class TrackerDataBlock
class TrackerDataBlockFooter(LERecord):
droid = DomainRelativeObjId
droid_birth = DomainRelativeObjId
# end class TrackerDataBlockFooter
| gpl-3.0 |
lishensan/xbmc | lib/gtest/test/gtest_output_test.py | 184 | 12027 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO([email protected]): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ, capture_stderr=False)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| gpl-2.0 |
atul-bhouraskar/django | tests/model_inheritance_regress/models.py | 243 | 5863 | from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Meta:
ordering = ('name',)
def __str__(self):
return "%s the place" % self.name
@python_2_unicode_compatible
class Restaurant(Place):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, models.CASCADE, primary_key=True, parent_link=True)
capacity = models.IntegerField()
def __str__(self):
return "%s the parking lot" % self.name
class ParkingLot2(Place):
# In lieu of any other connector, an existing OneToOneField will be
# promoted to the primary key.
parent = models.OneToOneField(Place, models.CASCADE)
class ParkingLot3(Place):
# The parent_link connector need not be the pk on the model.
primary_key = models.AutoField(primary_key=True)
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class ParkingLot4(models.Model):
# Test parent_link connector can be discovered in abstract classes.
parent = models.OneToOneField(Place, models.CASCADE, parent_link=True)
class Meta:
abstract = True
class ParkingLot4A(ParkingLot4, Place):
pass
class ParkingLot4B(Place, ParkingLot4):
pass
@python_2_unicode_compatible
class Supplier(models.Model):
name = models.CharField(max_length=50)
restaurant = models.ForeignKey(Restaurant, models.CASCADE)
def __str__(self):
return self.name
class Wholesaler(Supplier):
retailer = models.ForeignKey(Supplier, models.CASCADE, related_name='wholesale_supplier')
class Parent(models.Model):
created = models.DateTimeField(default=datetime.datetime.now)
class Child(Parent):
name = models.CharField(max_length=10)
class SelfRefParent(models.Model):
parent_data = models.IntegerField()
self_data = models.ForeignKey('self', models.SET_NULL, null=True)
class SelfRefChild(SelfRefParent):
child_data = models.IntegerField()
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
class Meta:
ordering = ('-pub_date', 'headline')
def __str__(self):
return self.headline
class ArticleWithAuthor(Article):
author = models.CharField(max_length=100)
class M2MBase(models.Model):
articles = models.ManyToManyField(Article)
class M2MChild(M2MBase):
name = models.CharField(max_length=50)
class Evaluation(Article):
quality = models.IntegerField()
class Meta:
abstract = True
class QualityControl(Evaluation):
assignee = models.CharField(max_length=50)
@python_2_unicode_compatible
class BaseM(models.Model):
base_name = models.CharField(max_length=100)
def __str__(self):
return self.base_name
@python_2_unicode_compatible
class DerivedM(BaseM):
customPK = models.IntegerField(primary_key=True)
derived_name = models.CharField(max_length=100)
def __str__(self):
return "PK = %d, base_name = %s, derived_name = %s" % (
self.customPK, self.base_name, self.derived_name)
class AuditBase(models.Model):
planned_date = models.DateField()
class Meta:
abstract = True
verbose_name_plural = 'Audits'
class CertificationAudit(AuditBase):
class Meta(AuditBase.Meta):
abstract = True
class InternalCertificationAudit(CertificationAudit):
auditing_dept = models.CharField(max_length=20)
# Check that abstract classes don't get m2m tables autocreated.
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=100)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractEvent(models.Model):
name = models.CharField(max_length=100)
attendees = models.ManyToManyField(Person, related_name="%(class)s_set")
class Meta:
abstract = True
ordering = ('name',)
def __str__(self):
return self.name
class BirthdayParty(AbstractEvent):
pass
class BachelorParty(AbstractEvent):
pass
class MessyBachelorParty(BachelorParty):
pass
# Check concrete -> abstract -> concrete inheritance
class SearchableLocation(models.Model):
keywords = models.CharField(max_length=256)
class Station(SearchableLocation):
name = models.CharField(max_length=128)
class Meta:
abstract = True
class BusStation(Station):
bus_routes = models.CommaSeparatedIntegerField(max_length=128)
inbound = models.BooleanField(default=False)
class TrainStation(Station):
zone = models.IntegerField()
class User(models.Model):
username = models.CharField(max_length=30, unique=True)
class Profile(User):
profile_id = models.AutoField(primary_key=True)
extra = models.CharField(max_length=30, blank=True)
# Check concrete + concrete -> concrete -> concrete
class Politician(models.Model):
politician_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=50)
class Congressman(Person, Politician):
state = models.CharField(max_length=2)
class Senator(Congressman):
pass
| bsd-3-clause |
Adnn/django | tests/responses/tests.py | 226 | 4171 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.http import HttpResponse
from django.http.response import HttpResponseBase
from django.test import SimpleTestCase
UTF8 = 'utf-8'
ISO88591 = 'iso-8859-1'
class HttpResponseBaseTests(SimpleTestCase):
def test_closed(self):
r = HttpResponseBase()
self.assertIs(r.closed, False)
r.close()
self.assertIs(r.closed, True)
def test_write(self):
r = HttpResponseBase()
self.assertIs(r.writable(), False)
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.write('asdf')
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance is not writable'):
r.writelines(['asdf\n', 'qwer\n'])
def test_tell(self):
r = HttpResponseBase()
with self.assertRaisesMessage(IOError, 'This HttpResponseBase instance cannot tell its position'):
r.tell()
def test_setdefault(self):
"""
HttpResponseBase.setdefault() should not change an existing header
and should be case insensitive.
"""
r = HttpResponseBase()
r['Header'] = 'Value'
r.setdefault('header', 'changed')
self.assertEqual(r['header'], 'Value')
r.setdefault('x-header', 'DefaultValue')
self.assertEqual(r['X-Header'], 'DefaultValue')
class HttpResponseTests(SimpleTestCase):
def test_status_code(self):
resp = HttpResponse(status=503)
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_change_status_code(self):
resp = HttpResponse()
resp.status_code = 503
self.assertEqual(resp.status_code, 503)
self.assertEqual(resp.reason_phrase, "Service Unavailable")
def test_reason_phrase(self):
reason = "I'm an anarchist coffee pot on crack."
resp = HttpResponse(status=814, reason=reason)
self.assertEqual(resp.status_code, 814)
self.assertEqual(resp.reason_phrase, reason)
def test_charset_detection(self):
""" HttpResponse should parse charset from content_type."""
response = HttpResponse('ok')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
self.assertEqual(response['Content-Type'], 'text/html; charset=%s' % ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % UTF8, charset=ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=%s' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset="%s"' % ISO88591)
self.assertEqual(response.charset, ISO88591)
response = HttpResponse(content_type='text/plain; charset=')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
response = HttpResponse(content_type='text/plain')
self.assertEqual(response.charset, settings.DEFAULT_CHARSET)
def test_response_content_charset(self):
"""HttpResponse should encode based on charset."""
content = "Café :)"
utf8_content = content.encode(UTF8)
iso_content = content.encode(ISO88591)
response = HttpResponse(utf8_content)
self.assertContains(response, utf8_content)
response = HttpResponse(iso_content, content_type='text/plain; charset=%s' % ISO88591)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content)
self.assertContains(response, iso_content)
response = HttpResponse(iso_content, content_type='text/plain')
self.assertContains(response, iso_content)
def test_repr(self):
response = HttpResponse(content="Café :)".encode(UTF8), status=201)
expected = '<HttpResponse status_code=201, "text/html; charset=utf-8">'
self.assertEqual(repr(response), expected)
| bsd-3-clause |
0Chencc/CTFCrackTools | Lib/Lib/distutils/util.py | 6 | 22225 | """distutils.util
Miscellaneous utility functions -- anything that doesn't fit into
one of the other *util.py modules.
"""
__revision__ = "$Id: util.py 83588 2010-08-02 21:35:06Z ezio.melotti $"
import sys, os, string, re
from distutils.errors import DistutilsPlatformError
from distutils.dep_util import newer
from distutils.spawn import spawn
from distutils import log
from distutils.errors import DistutilsByteCompileError
if sys.platform.startswith('java'):
import _imp
def get_platform ():
"""Return a string that identifies the current platform. This is used
mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = string.find(sys.version, prefix)
if i == -1:
return sys.platform
j = string.find(sys.version, ")", i)
look = sys.version[i+len(prefix):j].lower()
if look=='amd64':
return 'win-amd64'
if look=='itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
(osname, host, release, version, machine) = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = string.lower(osname)
osname = string.replace(osname, '/', '')
machine = string.replace(machine, ' ', '_')
machine = string.replace(machine, '/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile (r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
from distutils.sysconfig import get_config_vars
cfgvars = get_config_vars()
macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET')
if not macver:
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if 1:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
m = re.search(
r'<key>ProductUserVisibleVersion</key>\s*' +
r'<string>(.*?)</string>', f.read())
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
from distutils.sysconfig import get_config_vars
release = macver
osname = "macosx"
if (macrelease + '.') >= '10.4.' and \
'-arch' in get_config_vars().get('CFLAGS', '').strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r"%(archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxint >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
machine = 'ppc'
# See 'i386' case
if sys.maxint >= 2**32:
machine = 'ppc64'
return "%s-%s-%s" % (osname, release, machine)
# get_platform ()
def convert_path (pathname):
"""Return 'pathname' as a name that will work on the native filesystem,
i.e. split it on '/' and put it back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError, "path '%s' cannot be absolute" % pathname
if pathname[-1] == '/':
raise ValueError, "path '%s' cannot end with '/'" % pathname
paths = string.split(pathname, '/')
while '.' in paths:
paths.remove('.')
if not paths:
return os.curdir
return os.path.join(*paths)
# convert_path ()
def change_root (new_root, pathname):
"""Return 'pathname' with 'new_root' prepended. If 'pathname' is
relative, this is equivalent to "os.path.join(new_root,pathname)".
Otherwise, it requires making 'pathname' relative and then joining the
two, which is tricky on DOS/Windows and Mac OS.
"""
os_name = os._name if sys.platform.startswith('java') else os.name
if os_name == 'posix':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
return os.path.join(new_root, pathname[1:])
elif os_name == 'nt':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == '\\':
path = path[1:]
return os.path.join(new_root, path)
elif os_name == 'os2':
(drive, path) = os.path.splitdrive(pathname)
if path[0] == os.sep:
path = path[1:]
return os.path.join(new_root, path)
elif os_name == 'mac':
if not os.path.isabs(pathname):
return os.path.join(new_root, pathname)
else:
# Chop off volume name from start of path
elements = string.split(pathname, ":", 1)
pathname = ":" + elements[1]
return os.path.join(new_root, pathname)
else:
raise DistutilsPlatformError, \
"nothing known about platform '%s'" % os_name
_environ_checked = 0
def check_environ ():
"""Ensure that 'os.environ' has all the environment variables we
guarantee that users can use in config files, command-line options,
etc. Currently this includes:
HOME - user's home directory (Unix only)
PLAT - description of the current platform, including hardware
and OS (see 'get_platform()')
"""
global _environ_checked
if _environ_checked:
return
if os.name == 'posix' and 'HOME' not in os.environ:
import pwd
os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
if 'PLAT' not in os.environ:
os.environ['PLAT'] = get_platform()
_environ_checked = 1
def subst_vars (s, local_vars):
"""Perform shell/Perl-style variable substitution on 'string'. Every
occurrence of '$' followed by a name is considered a variable, and
variable is substituted by the value found in the 'local_vars'
dictionary, or in 'os.environ' if it's not in 'local_vars'.
'os.environ' is first checked/augmented to guarantee that it contains
certain values: see 'check_environ()'. Raise ValueError for any
variables not found in either 'local_vars' or 'os.environ'.
"""
check_environ()
def _subst (match, local_vars=local_vars):
var_name = match.group(1)
if var_name in local_vars:
return str(local_vars[var_name])
else:
return os.environ[var_name]
try:
return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
except KeyError, var:
raise ValueError, "invalid variable '$%s'" % var
# subst_vars ()
def grok_environment_error (exc, prefix="error: "):
"""Generate a useful error message from an EnvironmentError (IOError or
OSError) exception object. Handles Python 1.5.1 and 1.5.2 styles, and
does what it can to deal with exception objects that don't have a
filename (which happens when the error is due to a two-file operation,
such as 'rename()' or 'link()'. Returns the error message as a string
prefixed with 'prefix'.
"""
# check for Python 1.5.2-style {IO,OS}Error exception objects
if hasattr(exc, 'filename') and hasattr(exc, 'strerror'):
if exc.filename:
error = prefix + "%s: %s" % (exc.filename, exc.strerror)
else:
# two-argument functions in posix module don't
# include the filename in the exception object!
error = prefix + "%s" % exc.strerror
else:
error = prefix + str(exc[-1])
return error
# Needed by 'split_quoted()'
_wordchars_re = _squote_re = _dquote_re = None
def _init_regex():
global _wordchars_re, _squote_re, _dquote_re
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
def split_quoted (s):
"""Split a string up according to Unix shell-like rules for quotes and
backslashes. In short: words are delimited by spaces, as long as those
spaces are not escaped by a backslash, or inside a quoted string.
Single and double quotes are equivalent, and the quote characters can
be backslash-escaped. The backslash is stripped from any two-character
escape sequence, leaving only the escaped character. The quote
characters are stripped from any quoted string. Returns a list of
words.
"""
# This is a nice algorithm for splitting up a single string, since it
# doesn't require character-by-character examination. It was a little
# bit of a brain-bender to get it working right, though...
if _wordchars_re is None: _init_regex()
s = string.strip(s)
words = []
pos = 0
while s:
m = _wordchars_re.match(s, pos)
end = m.end()
if end == len(s):
words.append(s[:end])
break
if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
words.append(s[:end]) # we definitely have a word delimiter
s = string.lstrip(s[end:])
pos = 0
elif s[end] == '\\': # preserve whatever is being escaped;
# will become part of the current word
s = s[:end] + s[end+1:]
pos = end+1
else:
if s[end] == "'": # slurp singly-quoted string
m = _squote_re.match(s, end)
elif s[end] == '"': # slurp doubly-quoted string
m = _dquote_re.match(s, end)
else:
raise RuntimeError, \
"this can't happen (bad char '%c')" % s[end]
if m is None:
raise ValueError, \
"bad string (mismatched %s quotes?)" % s[end]
(beg, end) = m.span()
s = s[:beg] + s[beg+1:end-1] + s[end:]
pos = m.end() - 2
if pos >= len(s):
words.append(s)
break
return words
# split_quoted ()
def execute (func, args, msg=None, verbose=0, dry_run=0):
"""Perform some action that affects the outside world (eg. by
writing to the filesystem). Such actions are special because they
are disabled by the 'dry_run' flag. This method takes care of all
that bureaucracy for you; all you have to do is supply the
function to call and an argument tuple for it (to embody the
"external action" being performed), and an optional message to
print.
"""
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
func(*args)
def strtobool (val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = string.lower(val)
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError, "invalid truth value %r" % (val,)
def byte_compile (py_files,
optimize=0, force=0,
prefix=None, base_dir=None,
verbose=1, dry_run=0,
direct=None):
"""Byte-compile a collection of Python source files to either .pyc
or .pyo files in the same directory. 'py_files' is a list of files
to compile; any files that don't end in ".py" are silently skipped.
'optimize' must be one of the following:
0 - don't optimize (generate .pyc)
1 - normal optimization (like "python -O")
2 - extra optimization (like "python -OO")
If 'force' is true, all files are recompiled regardless of
timestamps.
The source filename encoded in each bytecode file defaults to the
filenames listed in 'py_files'; you can modify these with 'prefix' and
'basedir'. 'prefix' is a string that will be stripped off of each
source filename, and 'base_dir' is a directory name that will be
prepended (after 'prefix' is stripped). You can supply either or both
(or neither) of 'prefix' and 'base_dir', as you wish.
If 'dry_run' is true, doesn't actually do anything that would
affect the filesystem.
Byte-compilation is either done directly in this interpreter process
with the standard py_compile module, or indirectly by writing a
temporary script and executing it. Normally, you should let
'byte_compile()' figure out to use direct compilation or not (see
the source for details). The 'direct' flag is used by the script
generated in indirect mode; unless you know what you're doing, leave
it set to None.
"""
# nothing is done if sys.dont_write_bytecode is True
if sys.dont_write_bytecode:
raise DistutilsByteCompileError('byte-compiling is disabled.')
# First, if the caller didn't force us into direct or indirect mode,
# figure out which mode we should be in. We take a conservative
# approach: choose direct mode *only* if the current interpreter is
# in debug mode and optimize is 0. If we're not in debug mode (-O
# or -OO), we don't know which level of optimization this
# interpreter is running with, so we can't do direct
# byte-compilation and be certain that it's the right thing. Thus,
# always compile indirectly if the current interpreter is in either
# optimize mode, or if either optimization level was requested by
# the caller.
if direct is None:
direct = (__debug__ and optimize == 0)
# "Indirect" byte-compilation: write a temporary script and then
# run it with the appropriate flags.
if not direct:
try:
from tempfile import mkstemp
(script_fd, script_name) = mkstemp(".py")
except ImportError:
from tempfile import mktemp
(script_fd, script_name) = None, mktemp(".py")
log.info("writing byte-compilation script '%s'", script_name)
if not dry_run:
if script_fd is not None:
script = os.fdopen(script_fd, "w")
else:
script = open(script_name, "w")
script.write("""\
from distutils.util import byte_compile
files = [
""")
# XXX would be nice to write absolute filenames, just for
# safety's sake (script should be more robust in the face of
# chdir'ing before running it). But this requires abspath'ing
# 'prefix' as well, and that breaks the hack in build_lib's
# 'byte_compile()' method that carefully tacks on a trailing
# slash (os.sep really) to make sure the prefix here is "just
# right". This whole prefix business is rather delicate -- the
# problem is that it's really a directory, but I'm treating it
# as a dumb string, so trailing slashes and so forth matter.
#py_files = map(os.path.abspath, py_files)
#if prefix:
# prefix = os.path.abspath(prefix)
script.write(string.join(map(repr, py_files), ",\n") + "]\n")
script.write("""
byte_compile(files, optimize=%r, force=%r,
prefix=%r, base_dir=%r,
verbose=%r, dry_run=0,
direct=1)
""" % (optimize, force, prefix, base_dir, verbose))
script.close()
cmd = [sys.executable, script_name]
if optimize == 1:
cmd.insert(1, "-O")
elif optimize == 2:
cmd.insert(1, "-OO")
spawn(cmd, dry_run=dry_run)
execute(os.remove, (script_name,), "removing %s" % script_name,
dry_run=dry_run)
# "Direct" byte-compilation: use the py_compile module to compile
# right here, right now. Note that the script generated in indirect
# mode simply calls 'byte_compile()' in direct mode, a weird sort of
# cross-process recursion. Hey, it works!
else:
import py_compile
for file in py_files:
if file[-3:] != ".py":
# This lets us be lazy and not filter filenames in
# the "install_lib" command.
continue
# Terminology from the py_compile module:
# cfile - byte-compiled file
# dfile - purported source filename (same as 'file' by default)
if sys.platform.startswith('java'):
cfile = _imp.makeCompiledFilename(file)
else:
cfile = file + (__debug__ and "c" or "o")
dfile = file
if prefix:
if file[:len(prefix)] != prefix:
raise ValueError, \
("invalid prefix: filename %r doesn't start with %r"
% (file, prefix))
dfile = dfile[len(prefix):]
if base_dir:
dfile = os.path.join(base_dir, dfile)
cfile_base = os.path.basename(cfile)
if direct:
if force or newer(file, cfile):
log.info("byte-compiling %s to %s", file, cfile_base)
if not dry_run:
py_compile.compile(file, cfile, dfile)
else:
log.debug("skipping byte-compilation of %s to %s",
file, cfile_base)
# byte_compile ()
def rfc822_escape (header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = string.split(header, '\n')
header = string.join(lines, '\n' + 8*' ')
return header
| gpl-3.0 |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/pages/migrations/0014_auto_20170817_1705.py | 2 | 2045 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-17 17:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('wagtailcore', '0039_collectionviewrestriction'),
('wagtailsearchpromotions', '0002_capitalizeverbose'),
('wagtailredirects', '0005_capitalizeverbose'),
('wagtailforms', '0003_capitalizeverbose'),
('pages', '0013_videopage_videopagecarouselitem'),
]
operations = [
migrations.CreateModel(
name='VideoGalleryPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.RenameModel(
old_name='VideoPageCarouselItem',
new_name='VideoGalleryPageCarouselItem',
),
migrations.RemoveField(
model_name='videopage',
name='feed_image',
),
migrations.RemoveField(
model_name='videopage',
name='page_ptr',
),
migrations.AlterField(
model_name='videogallerypagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='pages.VideoGalleryPage'),
),
migrations.DeleteModel(
name='VideoPage',
),
]
| mit |
alanlhutchison/empirical-JTK_CYCLE-with-asymmetry | previous_files/jtk7.py | 1 | 19118 | #!/usr/bin/env python
"""
Created on April 20 2014
@author: Alan L. Hutchison, [email protected], Aaron R. Dinner Group, University of Chicago
This script is one in a series of scripts for running empirical JTK_CYCLE analysis as described in
Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e 1004094. doi:10.1371/journal.pcbi.1004094
Please use ./jtk7.py -h to see the help screen for further instructions on running this script.
"""
VERSION="1.1"
from scipy.stats import kendalltau
from operator import itemgetter
import numpy as np
import sys
import argparse
import itertools as it
import time
#import matplotlib.pyplot as plt
#import matplotlib.cm as cm
from scipy.stats import norm
import os.path
def main(args):
fn = args.filename
prefix = args.prefix
fn_waveform = args.waveform
fn_period = args.period
fn_phase = args.phase
fn_width = args.width
fn_out = args.output
if fn_out == "DEFAULT":
if ".txt" in fn:
fn_out=fn.replace(".txt","_"+prefix+"_jtkout.txt")
else:
fn_out = fn+"_" +prefix + "_jtkout.txt"
print fn
add_on = 1
while os.path.isfile(fn_out):
print fn_out, "already exists, take evasive action!!!"
endstr = '.'+fn_out.split('.')[-1]
mid = '_'+str(add_on)+endstr
if add_on ==1:
fn_out = fn_out.replace(endstr,mid)
else:
midendstr = '_'+fn_out.split('_')[-1]
fn_out = fn_out.replace(midendstr,mid)
add_on = add_on + 1
waveforms = read_in_list(fn_waveform)
periods = read_in_list(fn_period)
phases = read_in_list(fn_phase)
widths = read_in_list(fn_width)
#fn_out = "\t".join(fn.replace("jtkprepared","").split(".")[0:-1])+"_jtkout_emprical.txt"
header,data = read_in(fn)
header,series = organize_data(header,data)
RealKen = KendallTauP()
#output = ["ID\tWaveform\tPeriod\tPhase\tAsymmetry\tMean\tStd_Dev\tMax\tMin\tMax_Amp\tFC\tIQR_FC\tTau\tempP"]
Ps = []
with open(fn_out,'w') as g:
g.write("ID\tWaveform\tPeriod\tPhase\tAsymmetry\tMean\tStd_Dev\tMax\tMaxLoc\tMin\tMinLoc\tMax_Amp\tFC\tIQR_FC\tTau\tP\n")
for serie in series:
if [s for s in serie[1:] if s!="NA"]==[]:
name = [serie[0]]+["All_NA"]+[-10000]*10+[np.nan,np.nan]
else:
mmax,mmaxloc,mmin,mminloc,MAX_AMP=series_char(serie,header)
sIQR_FC=IQR_FC(serie)
smean = series_mean(serie)
sstd = series_std(serie)
sFC = FC(serie)
local_ps = []
for waveform in waveforms:
for period in periods:
for phase in phases:
for width in widths:
reference = generate_base_reference(header,waveform,period,phase,width)
geneID,tau,p = generate_mod_series(reference,serie,RealKen)
out_line = [geneID,waveform,period,phase,width,smean,sstd,mmax,mmaxloc,mmin,mminloc,MAX_AMP,sFC,sIQR_FC,tau,p]
out_line = [str(l) for l in out_line]
g.write("\t".join(out_line)+"\n")
#local_ps = sorted(local_ps)
#best = min(local_ps)
#Ps.append(best)
#append_out(fn_out,best)
#name = [geneID,waveform,period,phase,width,smean,sstd,mmax,mmin,MAX_AMP,sFC,sIQR_FC,tau,empirical_p]
#name = [str(n) for n in name]
#print "\t".join(name)
#print time.asctime( time.localtime(time.time()) )
#output.append("\t".join(name))
#write_out(fn_out,Ps)
def append_out(fn_out,line):
line = [str(l) for l in line]
with open(fn_out,'a') as g:
g.write("\t".join(line)+"\n")
def write_out(fn_out,output):
with open(fn_out,'w') as g:
for line in output:
g.write(str(line)+"\n")
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def read_in_list(fn):
with open(fn,'r') as f:
lines = f.read().splitlines()
return lines
def read_in(fn):
"""Read in data to header and data"""
with open(fn,'r') as f:
data=[]
start_right=0
for line in f:
words = line.strip().split()
words = [word.strip() for word in words]
if words[0] == "#":
start_right = 1
header = words[1:]
else:
if start_right == 0:
print "Please enter file with header starting with #"
elif start_right == 1:
data.append(words)
return header, data
def organize_data(header,data):
"""
Organize list of lists from such that genes with similar time-series holes match (for null distribution calc)
Return a header ['#','ZTX','ZTY'...] and a list of lists [ lists with similar holes (identical null distribution) , [],[],[]]
"""
L = data
for i in xrange(1,len(header)):
L=sorted(L, key=itemgetter(i))
return header,L
def generate_base_reference(header,waveform="cosine",period=24,phase=0,width=12):
"""
This will generate a waveform with a given phase and period based on the header,
"""
tpoints = []
ZTs = header
coef = 2.0 * np.pi / float(period)
w = float(width) * coef
for ZT in ZTs:
z = ZT[2:].split("_")[0]
tpoints.append( (float(z)-float(phase) ) * coef)
def trough(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = 1 + -x/w
elif x > w:
y = (x-w)/(2*np.pi - w)
return y
def cosine(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = np.cos(x/(w/np.pi))
elif x > w:
y = np.cos( (x+2.*(np.pi-w))*np.pi/ (2*np.pi - w) )
return y
if waveform == "cosine":
reference=[cosine(tpoint,w) for tpoint in tpoints]
elif waveform == "trough":
reference=[trough(tpoint,w) for tpoint in tpoints]
return reference
def IQR_FC(series):
qlo = __score_at_percentile__(series, 25)
qhi = __score_at_percentile__(series, 75)
if (qlo=="NA" or qhi=="NA"):
return "NA"
elif (qhi==0):
return 0
elif ( qlo==0):
return "NA"
else:
iqr = qhi/qlo
return iqr
def FC(series):
series=[float(s) if s!="NA" else 0 for s in series[1:] if s!="NA" ]
if series!=[]:
mmax = max(series)
mmin = min(series)
if mmin==0:
sFC = -10000
else:
sFC = mmax / mmin
else:
sFC = "NA"
return sFC
def series_char(fullseries,header):
"""Uses interquartile range to estimate amplitude of a time series."""
series=[float(s) for s in fullseries[1:] if s!="NA"]
head = [header[i] for i,s in enumerate(fullseries[1:]) if s!="NA"]
if series!=[]:
mmax = max(series)
#print series.index(mmax)
mmaxloc = head[series.index(mmax)]
mmin = min(series)
#print series.index(mmin)
mminloc = head[series.index(mmin)]
diff=mmax-mmin
else:
mmax = "NA"
mmaxloc = "NA"
mmin = "NA"
mminloc = "NA"
diff = "NA"
return mmax,mmaxloc,mmin,mminloc,diff
def series_mean(series):
"""Finds the mean of a timeseries"""
series = [float(s) for s in series[1:] if s!="NA"]
return np.mean(series)
def series_std(series):
"""Finds the std dev of a timeseries"""
series = [float(s) for s in series[1:] if s!="NA"]
return np.std(series)
def __score_at_percentile__(ser, per):
ser = [float(se) for se in ser[1:] if se!="NA"]
if len(ser)<5:
score ="NA"
return score
else:
ser = np.sort(ser)
i = (per/100. * len(ser))
if (i % 1 == 0):
score = ser[i]
else:
interpolate = lambda a,b,frac: a + (b - a)*frac
score = interpolate(ser[int(i)], ser[int(i) + 1], i % 1)
return float(score)
def generate_mod_series(reference,series,RealKen):
"""
Takes the series from generate_base_null, takes the list from data, and makes a null
for each gene in data or uses the one previously calculated.
Then it runs Kendall's Tau on the exp. series against the null
"""
geneID = series[0]
values = series[1:]
binary = np.array([1.0 if value!="NA" else np.nan for value in values])
reference = np.array(reference)
temp = reference*binary
mod_reference = [value for value in temp if not np.isnan(value)]
mod_values = [float(value) for value in values if value!='NA']
if len(mod_values) < 3:
tau,p = np.nan,np.nan
elif mod_values.count(np.nan) == len(mod_values):
tau,p = np.nan,np.nan
elif mod_values.count(0) == len(mod_values):
tau,p = np.nan,np.nan
else:
tau,p=kendalltau(mod_values,mod_reference)
if not np.isnan(tau):
if len(mod_values) < 150:
pk = RealKen.pval(tau,len(mod_values))
if pk is not None:
p=pk
else:
p = p / 2.0
if tau < 0:
p = 1-p
return geneID,tau,p
def __create_parser__():
p = argparse.ArgumentParser(
description="Python script for running empirical JTK_CYCLE with asymmetry search as described in Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e1004094. This script was written by Alan L. Hutchison, [email protected], Aaron R. Dinner Group, University of Chicago.",
epilog="Please contact the correpsonding author if you have any questions.",
version=VERSION
)
#p.add_argument("-t", "--test",
# action='store_true',
# default=False,
# help="run the Python unittest testing suite")
p.add_argument("-o", "--output",
dest="output",
action='store',
metavar="filename string",
type=str,
default = "DEFAULT",
help="You want to output something. If you leave this blank, _jtkout.txt will be appended to your filename")
analysis = p.add_argument_group(title="JTK_CYCLE analysis options")
analysis.add_argument("-f", "--filename",
dest="filename",
action='store',
metavar="filename string",
type=str,
help='This is the filename of the data series you wish to analyze.\
The data should be tab-spaced. The first row should contain a # sign followed by the time points with either CT or ZT preceding the time point (such as ZT0 or ZT4). Longer or shorter prefixes will not work. The following rows should contain the gene/series ID followed by the values for every time point. Where values are not available NA should be put in it\'s place.')
analysis.add_argument("-x","--prefix",
dest="prefix",
type=str,
metavar="string",
action='store',
default="",
help="string to be inserted in the output filename for this run")
analysis.add_argument("--waveform",
dest="waveform",
type=str,
metavar="filename string",
action='store',
default="cosine",
#choices=["waveform_cosine.txt","waveform_rampup.txt","waveform_rampdown.txt","waveform_step.txt","waveform_impulse.txt","waveform_trough.txt"],
help='Should be a file with waveforms you wish to search for listed in a single column separated by newlines.\
Options include cosine (dflt), trough')
analysis.add_argument("-w", "--width", "-a", "--asymmetry",
dest="width",
type=str,
metavar="filename string",
action='store',
default="widths_02-22.txt",
#choices=["widths_02-22.txt","widths_04-20_by4.txt","widths_04-12-20.txt","widths_08-16.txt","width_12.txt"]
help='Should be a file with asymmetries (widths) you wish to search for listed in a single column separated by newlines.\
Provided files include files like "widths_02-22.txt","widths_04-20_by4.txt","widths_04-12-20.txt","widths_08-16.txt","width_12.txt"\nasymmetries=widths')
analysis.add_argument("-ph", "--phase",
dest="phase",
metavar="filename string",
type=str,
default="phases_00-22_by2.txt",
help='Should be a file with phases you wish to search for listed in a single column separated by newlines.\
Example files include "phases_00-22_by2.txt" or "phases_00-22_by4.txt" or "phases_00-20_by4.txt"')
analysis.add_argument("-p","--period",
dest="period",
metavar="filename string",
type=str,
action='store',
default="period_24.txt",
help='Should be a file with phases you wish to search for listed in a single column separated by newlines.\
Provided file is "period_24.txt"')
distribution = analysis.add_mutually_exclusive_group(required=False)
distribution.add_argument("-e", "--exact",
dest="harding",
action='store_true',
default=False,
help="use Harding's exact null distribution (dflt)")
distribution.add_argument("-n", "--normal",
dest="normal",
action='store_true',
default=False,
help="use normal approximation to null distribution")
return p
# instantiate class to precalculate distribution
# usage:
# K = KendallTauP()
# pval = K.pval(tau,n,two_tailed=True)
class KendallTauP:
def __init__(self,N=150):
# largest number of samples to precompute
self.N = N
Nint = self.N*(self.N-1)/2
# first allocate freq slots for largest sample array
# as we fill this in we'll save the results for smaller samples
# total possible number of inversions is Nint + 1
freqN = np.zeros(Nint + 1)
freqN[0] = 1.0
# save results at each step in freqs array
self.freqs = [np.array([1.0])]
for i in xrange(1,self.N):
last = np.copy(freqN)
for j in xrange(Nint+1):
# update each entry by summing over i entries to the left
freqN[j] += sum(last[max(0,j-i):j])
# copy current state into freqs array
# the kth entry of freqs should have 1+k*(k-1)/2 entries
self.freqs.append(np.copy(freqN[0:(1+(i+1)*i/2)]))
# turn freqs into cdfs
# distributions still with respect to number of inversions
self.cdfs = []
for i in xrange(self.N):
self.cdfs.append(np.copy(self.freqs[i]))
# turn into cumulative frequencies
for j in xrange(1,len(self.freqs[i])):
self.cdfs[i][j] += self.cdfs[i][j-1]
# convert freqs to probs
self.cdfs[i] = self.cdfs[i]/sum(self.freqs[i])
# plot exact distribution compared to normal approx
def plot(self,nlist):
colors = cm.Set1(np.linspace(0,1,len(nlist)))
# for plotting gaussian
x = np.linspace(-1.2,1.2,300)
# plot pdfs
plt.figure()
for i in xrange(len(nlist)):
ntot = len(self.freqs[nlist[i]-1])-1
tauvals = (ntot - 2.0*np.arange(len(self.freqs[nlist[i]-1])))/ntot
probs = ((ntot+1.0)/2.0)*self.freqs[nlist[i]-1]/sum(self.freqs[nlist[i]-1])
plt.scatter(tauvals,probs,color=colors[i])
# now plot gaussian comparison
var = 2.0*(2.0*nlist[i]+5.0)/(nlist[i]*(nlist[i]-1)*9.0)
plt.plot(x,norm.pdf(x,0.0,np.sqrt(var)),color=colors[i])
plt.legend(nlist,loc='best')
# plt.savefig('pdfs.png')
plt.show()
# now plot cdfs
plt.figure()
for i in xrange(len(nlist)):
ntot = len(self.freqs[nlist[i]-1])-1
tauvals = -1.0*(ntot - 2.0*np.arange(len(self.freqs[nlist[i]-1])))/ntot
probs = self.cdfs[nlist[i]-1]
plt.scatter(tauvals,probs,color=colors[i])
# now plot gaussian comparison
var = 2.0*(2.0*nlist[i]+5.0)/(nlist[i]*(nlist[i]-1)*9.0)
plt.plot(x,norm.cdf(x,0.0,np.sqrt(var)),color=colors[i])
plt.legend(nlist,loc='best')
# plt.savefig('cdfs.png')
plt.show()
# use cdfs to return pval
# default to return two tailed pval
def pval(self,tau,n,two_tailed=False):
# enforce tau is between -1 and 1
if tau <= -1.000001 or tau >= 1.000001:
sys.stderr.write(str(type(tau))+"\n")
sys.stderr.write(str(tau)+"\n")
sys.stderr.write("invalid tau\n")
#print 'invalid tau'
return None
# enforce n is less than our precomputed quantities
if n > self.N:
#print 'n is too large'
sys.stderr.write("n is too large/n")
return None
# convert tau to value in terms of number of inversions
ntot = n*(n-1)/2
inv_score = int(round((ntot - tau * ntot)/2.0))
# I'm a little worried about the precision of this,
# but probably not enough to be really worried for reasonable n
# since we really only need precision to resolve ntot points
# if two tailed, we're getting a tail from a symmetric dist
min_inv_score = min(inv_score,ntot-inv_score)
if two_tailed:
pval = self.cdfs[n-1][min_inv_score]*2.0
else:
# if one tailed return prob of getting that or fewer inversions
pval = self.cdfs[n-1][inv_score]
# if inv_score is 0, might have larger than 0.5 prob
return min(pval,1.0)
if __name__=="__main__":
parser = __create_parser__()
args = parser.parse_args()
main(args)
| mit |
blooparksystems/odoo | addons/mass_mailing/controllers/main.py | 18 | 1211 | # -*- coding: utf-8 -*-
import werkzeug
from openerp import http, SUPERUSER_ID
from openerp.http import request
class MassMailController(http.Controller):
@http.route('/mail/track/<int:mail_id>/blank.gif', type='http', auth='none')
def track_mail_open(self, mail_id, **post):
""" Email tracking. """
mail_mail_stats = request.registry.get('mail.mail.statistics')
mail_mail_stats.set_opened(request.cr, SUPERUSER_ID, mail_mail_ids=[mail_id])
response = werkzeug.wrappers.Response()
response.mimetype = 'image/gif'
response.data = 'R0lGODlhAQABAIAAANvf7wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw=='.decode('base64')
return response
@http.route('/r/<string:code>/m/<int:stat_id>', type='http', auth="none")
def full_url_redirect(self, code, stat_id, **post):
cr, uid, context = request.cr, request.uid, request.context
request.registry['link.tracker.click'].add_click(cr, uid, code, request.httprequest.remote_addr, request.session['geoip'].get('country_code'), stat_id=stat_id, context=context)
return werkzeug.utils.redirect(request.registry['link.tracker'].get_url_from_code(cr, uid, code, context=context), 301)
| gpl-3.0 |
benpatterson/edx-platform | common/djangoapps/track/migrations/0001_initial.py | 189 | 2527 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TrackingLog'
db.create_table('track_trackinglog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('username', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('ip', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('event_source', self.gf('django.db.models.fields.CharField')(max_length=32)),
('event_type', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('event', self.gf('django.db.models.fields.TextField')(blank=True)),
('agent', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal('track', ['TrackingLog'])
def backwards(self, orm):
# Deleting model 'TrackingLog'
db.delete_table('track_trackinglog')
models = {
'track.trackinglog': {
'Meta': {'object_name': 'TrackingLog'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['track']
| agpl-3.0 |
happyleavesaoc/home-assistant | tests/components/camera/test_init.py | 15 | 3572 | """The tests for the camera component."""
import asyncio
from unittest.mock import patch
import pytest
from homeassistant.setup import setup_component
from homeassistant.const import ATTR_ENTITY_PICTURE
import homeassistant.components.camera as camera
import homeassistant.components.http as http
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.async import run_coroutine_threadsafe
from tests.common import (
get_test_home_assistant, get_test_instance_port, assert_setup_component)
class TestSetupCamera(object):
"""Test class for setup camera."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Setup demo platfrom on camera component."""
config = {
camera.DOMAIN: {
'platform': 'demo'
}
}
with assert_setup_component(1, camera.DOMAIN):
setup_component(self.hass, camera.DOMAIN, config)
class TestGetImage(object):
"""Test class for camera."""
def setup_method(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
setup_component(
self.hass, http.DOMAIN,
{http.DOMAIN: {http.CONF_SERVER_PORT: get_test_instance_port()}})
config = {
camera.DOMAIN: {
'platform': 'demo'
}
}
setup_component(self.hass, camera.DOMAIN, config)
state = self.hass.states.get('camera.demo_camera')
self.url = "{0}{1}".format(
self.hass.config.api.base_url,
state.attributes.get(ATTR_ENTITY_PICTURE))
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.camera.demo.DemoCamera.camera_image',
autospec=True, return_value=b'Test')
def test_get_image_from_camera(self, mock_camera):
"""Grab a image from camera entity."""
self.hass.start()
image = run_coroutine_threadsafe(camera.async_get_image(
self.hass, 'camera.demo_camera'), self.hass.loop).result()
assert mock_camera.called
assert image == b'Test'
def test_get_image_without_exists_camera(self):
"""Try to get image without exists camera."""
self.hass.states.remove('camera.demo_camera')
with pytest.raises(HomeAssistantError):
run_coroutine_threadsafe(camera.async_get_image(
self.hass, 'camera.demo_camera'), self.hass.loop).result()
def test_get_image_with_timeout(self, aioclient_mock):
"""Try to get image with timeout."""
aioclient_mock.get(self.url, exc=asyncio.TimeoutError())
with pytest.raises(HomeAssistantError):
run_coroutine_threadsafe(camera.async_get_image(
self.hass, 'camera.demo_camera'), self.hass.loop).result()
assert len(aioclient_mock.mock_calls) == 1
def test_get_image_with_bad_http_state(self, aioclient_mock):
"""Try to get image with bad http status."""
aioclient_mock.get(self.url, status=400)
with pytest.raises(HomeAssistantError):
run_coroutine_threadsafe(camera.async_get_image(
self.hass, 'camera.demo_camera'), self.hass.loop).result()
assert len(aioclient_mock.mock_calls) == 1
| apache-2.0 |
quinot/ansible | lib/ansible/modules/network/cloudengine/ce_switchport.py | 20 | 27667 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_switchport
version_added: "2.4"
short_description: Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches.
description:
- Manages Layer 2 switchport interfaces on HUAWEI CloudEngine switches.
author: QijunPan (@CloudEngine-Ansible)
notes:
- When C(state=absent), VLANs can be added/removed from trunk links and
the existing access VLAN can be 'unconfigured' to just having VLAN 1
on that interface.
- When working with trunks VLANs the keywords add/remove are always sent
in the C(port trunk allow-pass vlan) command. Use verbose mode to see
commands sent.
- When C(state=unconfigured), the interface will result with having a default
Layer 2 interface, i.e. vlan 1 in access mode.
options:
interface:
description:
- Full name of the interface, i.e. 40GE1/0/22.
required: true
default: null
mode:
description:
- The link type of an interface.
required: false
default: null
choices: ['access','trunk']
access_vlan:
description:
- If C(mode=access), used as the access VLAN ID, in the range from 1 to 4094.
required: false
default: null
native_vlan:
description:
- If C(mode=trunk), used as the trunk native VLAN ID, in the range from 1 to 4094.
required: false
default: null
trunk_vlans:
description:
- If C(mode=trunk), used as the VLAN range to ADD or REMOVE
from the trunk, such as 2-10 or 2,5,10-15, etc.
required: false
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present', 'absent', 'unconfigured']
'''
EXAMPLES = '''
- name: switchport module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Ensure 10GE1/0/22 is in its default switchport state
ce_switchport:
interface: 10GE1/0/22
state: unconfigured
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 is configured for access vlan 20
ce_switchport:
interface: 10GE1/0/22
mode: access
access_vlan: 20
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 only has vlans 5-10 as trunk vlans
ce_switchport:
interface: 10GE1/0/22
mode: trunk
native_vlan: 10
trunk_vlans: 5-10
provider: '{{ cli }}'
- name: Ensure 10GE1/0/22 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
ce_switchport:
interface: 10GE1/0/22
mode: trunk
native_vlan: 10
trunk_vlans: 2-50
provider: '{{ cli }}'
- name: Ensure these VLANs are not being tagged on the trunk
ce_switchport:
interface: 10GE1/0/22
mode: trunk
trunk_vlans: 51-4000
state: absent
provider: '{{ cli }}'
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"access_vlan": "20", "interface": "10GE1/0/22", "mode": "access"}
existing:
description: k/v pairs of existing switchport
returned: always
type: dict
sample: {"access_vlan": "10", "interface": "10GE1/0/22",
"mode": "access", "switchport": "enable"}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict
sample: {"access_vlan": "20", "interface": "10GE1/0/22",
"mode": "access", "switchport": "enable"}
updates:
description: command string sent to the device
returned: always
type: list
sample: ["10GE1/0/22", "port default vlan 20"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec
CE_NC_GET_INTF = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<isL2SwitchPort></isL2SwitchPort>
</interface>
</interfaces>
</ifm>
</filter>
"""
CE_NC_GET_PORT_ATTR = """
<filter type="subtree">
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf>
<ifName>%s</ifName>
<l2Enable></l2Enable>
<l2Attribute>
<linkType></linkType>
<pvid></pvid>
<trunkVlans></trunkVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</filter>
"""
CE_NC_SET_ACCESS_PORT = """
<config>
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>access</linkType>
<pvid>%s</pvid>
<trunkVlans></trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</config>
"""
CE_NC_SET_TRUNK_PORT_MODE = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_TRUNK_PORT_PVID = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
<pvid>%s</pvid>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_TRUNK_PORT_VLANS = """
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>trunk</linkType>
<trunkVlans>%s:%s</trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
"""
CE_NC_SET_DEFAULT_PORT = """
<config>
<ethernet xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ethernetIfs>
<ethernetIf operation="merge">
<ifName>%s</ifName>
<l2Attribute>
<linkType>access</linkType>
<pvid>1</pvid>
<trunkVlans></trunkVlans>
<untagVlans></untagVlans>
</l2Attribute>
</ethernetIf>
</ethernetIfs>
</ethernet>
</config>
"""
SWITCH_PORT_TYPE = ('ge', '10ge', '25ge',
'4x10ge', '40ge', '100ge', 'eth-trunk')
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_portswitch_enalbed(iftype):
""""[undo] portswitch"""
return bool(iftype in SWITCH_PORT_TYPE)
def vlan_bitmap_undo(bitmap):
"""convert vlan bitmap to undo bitmap"""
vlan_bit = ['F'] * 1024
if not bitmap or len(bitmap) == 0:
return ''.join(vlan_bit)
bit_len = len(bitmap)
for num in range(bit_len):
undo = (~int(bitmap[num], 16)) & 0xF
vlan_bit[num] = hex(undo)[2]
return ''.join(vlan_bit)
def is_vlan_bitmap_empty(bitmap):
"""check vlan bitmap empty"""
if not bitmap or len(bitmap) == 0:
return True
bit_len = len(bitmap)
for num in range(bit_len):
if bitmap[num] != '0':
return False
return True
class SwitchPort(object):
"""
Manages Layer 2 switchport interfaces.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface and vlan info
self.interface = self.module.params['interface']
self.mode = self.module.params['mode']
self.state = self.module.params['state']
self.access_vlan = self.module.params['access_vlan']
self.native_vlan = self.module.params['native_vlan']
self.trunk_vlans = self.module.params['trunk_vlans']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.intf_info = dict() # interface vlan info
self.intf_type = None # loopback tunnel ...
def init_module(self):
""" init module """
required_if = [('state', 'absent', ['mode']), ('state', 'present', ['mode'])]
self.module = AnsibleModule(
argument_spec=self.spec, required_if=required_if, supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_interface_dict(self, ifname):
""" get one interface attributes dict."""
intf_info = dict()
conf_str = CE_NC_GET_PORT_ATTR % ifname
rcv_xml = get_nc_config(self.module, conf_str)
if "<data/>" in rcv_xml:
return intf_info
intf = re.findall(
r'.*<ifName>(.*)</ifName>.*\s*<l2Enable>(.*)</l2Enable>.*', rcv_xml)
if intf:
intf_info = dict(ifName=intf[0][0],
l2Enable=intf[0][1],
linkType="",
pvid="",
trunkVlans="")
if intf_info["l2Enable"] == "enable":
attr = re.findall(
r'.*<linkType>(.*)</linkType>.*.*\s*<pvid>(.*)'
r'</pvid>.*\s*<trunkVlans>(.*)</trunkVlans>.*', rcv_xml)
if attr:
intf_info["linkType"] = attr[0][0]
intf_info["pvid"] = attr[0][1]
intf_info["trunkVlans"] = attr[0][2]
return intf_info
def is_l2switchport(self):
"""Check layer2 switch port"""
return bool(self.intf_info["l2Enable"] == "enable")
def merge_access_vlan(self, ifname, access_vlan):
"""Merge access interface vlan"""
change = False
conf_str = ""
self.updates_cmd.append("interface %s" % ifname)
if self.state == "present":
if self.intf_info["linkType"] == "access":
if access_vlan and self.intf_info["pvid"] != access_vlan:
self.updates_cmd.append(
"port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, access_vlan)
change = True
else: # not access
self.updates_cmd.append("port link-type access")
if access_vlan:
self.updates_cmd.append(
"port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, access_vlan)
else:
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
elif self.state == "absent":
if self.intf_info["linkType"] == "access":
if access_vlan and self.intf_info["pvid"] == access_vlan and access_vlan != "1":
self.updates_cmd.append(
"undo port default vlan %s" % access_vlan)
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
else: # not access
self.updates_cmd.append("port link-type access")
conf_str = CE_NC_SET_ACCESS_PORT % (ifname, "1")
change = True
if not change:
self.updates_cmd.pop() # remove interface
return
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "MERGE_ACCESS_PORT")
self.changed = True
def merge_trunk_vlan(self, ifname, native_vlan, trunk_vlans):
"""Merge trunk interface vlan"""
change = False
xmlstr = ""
self.updates_cmd.append("interface %s" % ifname)
if trunk_vlans:
vlan_list = self.vlan_range_to_list(trunk_vlans)
vlan_map = self.vlan_list_to_bitmap(vlan_list)
if self.state == "present":
if self.intf_info["linkType"] == "trunk":
if native_vlan and self.intf_info["pvid"] != native_vlan:
self.updates_cmd.append(
"port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, native_vlan)
change = True
if trunk_vlans:
add_vlans = self.vlan_bitmap_add(
self.intf_info["trunkVlans"], vlan_map)
if not is_vlan_bitmap_empty(add_vlans):
self.updates_cmd.append(
"port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, add_vlans, add_vlans)
change = True
else: # not trunk
self.updates_cmd.append("port link-type trunk")
change = True
if native_vlan:
self.updates_cmd.append(
"port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, native_vlan)
if trunk_vlans:
self.updates_cmd.append(
"port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, vlan_map, vlan_map)
if not native_vlan and not trunk_vlans:
xmlstr += CE_NC_SET_TRUNK_PORT_MODE % ifname
self.updates_cmd.append(
"undo port trunk allow-pass vlan 1")
elif self.state == "absent":
if self.intf_info["linkType"] == "trunk":
if native_vlan and self.intf_info["pvid"] == native_vlan and native_vlan != '1':
self.updates_cmd.append(
"undo port trunk pvid vlan %s" % native_vlan)
xmlstr += CE_NC_SET_TRUNK_PORT_PVID % (ifname, 1)
change = True
if trunk_vlans:
del_vlans = self.vlan_bitmap_del(
self.intf_info["trunkVlans"], vlan_map)
if not is_vlan_bitmap_empty(del_vlans):
self.updates_cmd.append(
"undo port trunk allow-pass %s"
% trunk_vlans.replace(',', ' ').replace('-', ' to '))
undo_map = vlan_bitmap_undo(del_vlans)
xmlstr += CE_NC_SET_TRUNK_PORT_VLANS % (
ifname, undo_map, del_vlans)
change = True
else: # not trunk
self.updates_cmd.append("port link-type trunk")
self.updates_cmd.append("undo port trunk allow-pass vlan 1")
xmlstr += CE_NC_SET_TRUNK_PORT_MODE % ifname
change = True
if not change:
self.updates_cmd.pop()
return
conf_str = "<config>" + xmlstr + "</config>"
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "MERGE_TRUNK_PORT")
self.changed = True
def default_switchport(self, ifname):
"""Set interface default or unconfigured"""
change = False
if self.intf_info["linkType"] != "access":
self.updates_cmd.append("interface %s" % ifname)
self.updates_cmd.append("port link-type access")
self.updates_cmd.append("port default vlan 1")
change = True
else:
if self.intf_info["pvid"] != "1":
self.updates_cmd.append("interface %s" % ifname)
self.updates_cmd.append("port default vlan 1")
change = True
if not change:
return
conf_str = CE_NC_SET_DEFAULT_PORT % ifname
rcv_xml = set_nc_config(self.module, conf_str)
self.check_response(rcv_xml, "DEFAULT_INTF_VLAN")
self.changed = True
def vlan_series(self, vlanid_s):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_s)
if peerlistlen != 2:
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
for num in range(peerlistlen):
if not vlanid_s[num].isdigit():
self.module.fail_json(
msg='Error: Format of vlanid is invalid.')
if int(vlanid_s[0]) > int(vlanid_s[1]):
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
elif int(vlanid_s[0]) == int(vlanid_s[1]):
vlan_list.append(str(vlanid_s[0]))
return vlan_list
for num in range(int(vlanid_s[0]), int(vlanid_s[1])):
vlan_list.append(str(num))
vlan_list.append(vlanid_s[1])
return vlan_list
def vlan_region(self, vlanid_list):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_list)
for num in range(peerlistlen):
if vlanid_list[num].isdigit():
vlan_list.append(vlanid_list[num])
else:
vlan_s = self.vlan_series(vlanid_list[num].split('-'))
vlan_list.extend(vlan_s)
return vlan_list
def vlan_range_to_list(self, vlan_range):
""" convert vlan range to vlan list """
vlan_list = self.vlan_region(vlan_range.split(','))
return vlan_list
def vlan_list_to_bitmap(self, vlanlist):
""" convert vlan list to vlan bitmap """
vlan_bit = ['0'] * 1024
bit_int = [0] * 1024
vlan_list_len = len(vlanlist)
for num in range(vlan_list_len):
tagged_vlans = int(vlanlist[num])
if tagged_vlans <= 0 or tagged_vlans > 4094:
self.module.fail_json(
msg='Error: Vlan id is not in the range from 1 to 4094.')
j = tagged_vlans / 4
bit_int[j] |= 0x8 >> (tagged_vlans % 4)
vlan_bit[j] = hex(bit_int[j])[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def vlan_bitmap_add(self, oldmap, newmap):
"""vlan add bitmap"""
vlan_bit = ['0'] * 1024
if len(newmap) != 1024:
self.module.fail_json(msg='Error: New vlan bitmap is invalid.')
if len(oldmap) != 1024 and len(oldmap) != 0:
self.module.fail_json(msg='Error: old vlan bitmap is invalid.')
if len(oldmap) == 0:
return newmap
for num in range(1024):
new_tmp = int(newmap[num], 16)
old_tmp = int(oldmap[num], 16)
add = (~(new_tmp & old_tmp)) & new_tmp
vlan_bit[num] = hex(add)[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def vlan_bitmap_del(self, oldmap, delmap):
"""vlan del bitmap"""
vlan_bit = ['0'] * 1024
if not oldmap or len(oldmap) == 0:
return ''.join(vlan_bit)
if len(oldmap) != 1024 or len(delmap) != 1024:
self.module.fail_json(msg='Error: vlan bitmap is invalid.')
for num in range(1024):
tmp = int(delmap[num], 16) & int(oldmap[num], 16)
vlan_bit[num] = hex(tmp)[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def check_params(self):
"""Check all input params"""
# interface type check
if self.interface:
self.intf_type = get_interface_type(self.interface)
if not self.intf_type:
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
if not self.intf_type or not is_portswitch_enalbed(self.intf_type):
self.module.fail_json(msg='Error: Interface %s is error.')
# check access_vlan
if self.access_vlan:
if not self.access_vlan.isdigit():
self.module.fail_json(msg='Error: Access vlan id is invalid.')
if int(self.access_vlan) <= 0 or int(self.access_vlan) > 4094:
self.module.fail_json(
msg='Error: Access vlan id is not in the range from 1 to 4094.')
# check native_vlan
if self.native_vlan:
if not self.native_vlan.isdigit():
self.module.fail_json(msg='Error: Native vlan id is invalid.')
if int(self.native_vlan) <= 0 or int(self.native_vlan) > 4094:
self.module.fail_json(
msg='Error: Native vlan id is not in the range from 1 to 4094.')
# get interface info
self.intf_info = self.get_interface_dict(self.interface)
if not self.intf_info:
self.module.fail_json(msg='Error: Interface does not exists.')
if not self.is_l2switchport():
self.module.fail_json(
msg='Error: Interface is not layer2 swtich port.')
def get_proposed(self):
"""get proposed info"""
self.proposed['state'] = self.state
self.proposed['interface'] = self.interface
self.proposed['mode'] = self.mode
self.proposed['access_vlan'] = self.access_vlan
self.proposed['native_vlan'] = self.native_vlan
self.proposed['trunk_vlans'] = self.trunk_vlans
def get_existing(self):
"""get existing info"""
if self.intf_info:
self.existing["interface"] = self.intf_info["ifName"]
self.existing["mode"] = self.intf_info["linkType"]
self.existing["switchport"] = self.intf_info["l2Enable"]
self.existing['access_vlan'] = self.intf_info["pvid"]
self.existing['native_vlan'] = self.intf_info["pvid"]
self.existing['trunk_vlans'] = self.intf_info["trunkVlans"]
def get_end_state(self):
"""get end state info"""
if self.intf_info:
end_info = self.get_interface_dict(self.interface)
if end_info:
self.end_state["interface"] = end_info["ifName"]
self.end_state["mode"] = end_info["linkType"]
self.end_state["switchport"] = end_info["l2Enable"]
self.end_state['access_vlan'] = end_info["pvid"]
self.end_state['native_vlan'] = end_info["pvid"]
self.end_state['trunk_vlans'] = end_info["trunkVlans"]
def work(self):
"""worker"""
self.check_params()
if not self.intf_info:
self.module.fail_json(msg='Error: interface does not exists.')
self.get_existing()
self.get_proposed()
# present or absent
if self.state == "present" or self.state == "absent":
if self.mode == "access":
self.merge_access_vlan(self.interface, self.access_vlan)
elif self.mode == "trunk":
self.merge_trunk_vlan(
self.interface, self.native_vlan, self.trunk_vlans)
# unconfigured
else:
self.default_switchport(self.interface)
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
interface=dict(required=True, type='str'),
mode=dict(choices=['access', 'trunk'], required=False),
access_vlan=dict(type='str', required=False),
native_vlan=dict(type='str', required=False),
trunk_vlans=dict(type='str', required=False),
state=dict(choices=['absent', 'present', 'unconfigured'],
default='present')
)
argument_spec.update(ce_argument_spec)
switchport = SwitchPort(argument_spec)
switchport.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
batxes/4Cin | SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models47488.py | 2 | 17589 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-1417.69, 7999.35, 7862.76), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((-255.789, 8001.16, 6498.04), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1043.94, 6568.7, 6558.27), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-1105.23, 6466.69, 7375.34), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-941.162, 5006.39, 7850.1), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((1250.29, 4004.55, 7630.89), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2884.02, 3624.42, 8062.42), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2110.46, 3500.1, 8396.99), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((4715.72, 3921.63, 8075.45), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((5603.13, 4104.1, 9479.44), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((7067.6, 4794.48, 8494.14), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((6688.59, 5676.66, 7930.28), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((6795.76, 7228.32, 7499.21), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((5634.72, 7607.97, 8385.17), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((5975.56, 9776.47, 8813.62), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((5649.95, 12276.5, 6970.73), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((5172.84, 11330.3, 5316.72), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((6551.64, 10831.2, 5332.18), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((7004.08, 9405.61, 6160.73), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((8213.17, 8881.32, 6942.09), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((7202.72, 6696.88, 6542.3), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((7962.53, 8540.03, 6089.45), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((8096.11, 8006.02, 4986.79), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8011.78, 9155.23, 4371.13), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((6949.51, 10034.4, 4743.02), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6646.02, 11608.3, 4814.11), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((7058.86, 10192.8, 5233.91), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((6239.48, 8380.45, 6155.62), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((7425.37, 7808.2, 5585.9), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((7734.98, 6616.85, 5947.09), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((7707.83, 6311.34, 5104.46), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((7397.39, 5899.65, 6742.1), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((8786.72, 6445.96, 5776.58), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((8002.09, 7254.54, 4818.09), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((8091.62, 8572.28, 5129.98), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((8481.41, 9896.28, 5385.73), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((7830, 7510.32, 5729.58), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((8838.78, 8494.92, 4532.17), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((7846.44, 8018.73, 4088.37), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((9360.31, 7795.61, 4522.41), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((8797.64, 6169.54, 4562.66), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((9629.03, 4740.16, 5396.91), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((11563.6, 4238.41, 3551.8), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((10427, 3109.14, 4639.27), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((9909.56, 4653.57, 4218.64), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((8122.37, 4792.28, 4962.55), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((7840.32, 4749.12, 3040.13), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((9854.48, 4998.02, 2711.31), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((8132.93, 4317.58, 2877.28), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((6638.64, 3828.94, 3974.42), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((7608.62, 2749.54, 3802.01), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((6754.89, 3424.53, 5098.69), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5639.85, 4421.52, 6128.29), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((5360.29, 3177.05, 7008.75), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((5545.02, 2580.77, 6606.94), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((5877.52, 3639.15, 4788.19), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((3879.74, 3905.18, 3928.14), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2837.81, 4577.98, 1724.25), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2662.59, 4759.64, 1181.39), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((1989.69, 4429.33, 1533.68), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((2989.63, 4262.46, 1724.71), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((2622.24, 3482.56, 1346.33), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((3423.06, 4206.64, 2896.65), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((2220.22, 3217.21, 1844.37), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((1013.07, 2718.64, 254.321), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((2635.84, 1944.75, 412.557), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((2741.68, 2678.39, -1078.41), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((3156.76, 3007.48, 1318.91), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((1418.87, 3169.47, 203.221), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((2004.23, 2953.28, -1245.3), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((3098.33, 3965.17, -962.568), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
craigderington/studentloan5 | studentloan5/Lib/encodings/cp874.py | 272 | 12595 | """ Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp874',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\ufffe' # 0x82 -> UNDEFINED
'\ufffe' # 0x83 -> UNDEFINED
'\ufffe' # 0x84 -> UNDEFINED
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\ufffe' # 0x86 -> UNDEFINED
'\ufffe' # 0x87 -> UNDEFINED
'\ufffe' # 0x88 -> UNDEFINED
'\ufffe' # 0x89 -> UNDEFINED
'\ufffe' # 0x8A -> UNDEFINED
'\ufffe' # 0x8B -> UNDEFINED
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\ufffe' # 0x99 -> UNDEFINED
'\ufffe' # 0x9A -> UNDEFINED
'\ufffe' # 0x9B -> UNDEFINED
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
'\u0e24' # 0xC4 -> THAI CHARACTER RU
'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
'\u0e26' # 0xC6 -> THAI CHARACTER LU
'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
'\ufffe' # 0xDB -> UNDEFINED
'\ufffe' # 0xDC -> UNDEFINED
'\ufffe' # 0xDD -> UNDEFINED
'\ufffe' # 0xDE -> UNDEFINED
'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
'\u0e50' # 0xF0 -> THAI DIGIT ZERO
'\u0e51' # 0xF1 -> THAI DIGIT ONE
'\u0e52' # 0xF2 -> THAI DIGIT TWO
'\u0e53' # 0xF3 -> THAI DIGIT THREE
'\u0e54' # 0xF4 -> THAI DIGIT FOUR
'\u0e55' # 0xF5 -> THAI DIGIT FIVE
'\u0e56' # 0xF6 -> THAI DIGIT SIX
'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
'\u0e59' # 0xF9 -> THAI DIGIT NINE
'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
'\ufffe' # 0xFC -> UNDEFINED
'\ufffe' # 0xFD -> UNDEFINED
'\ufffe' # 0xFE -> UNDEFINED
'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
pchauncey/ansible | lib/ansible/modules/cloud/rackspace/rax_files.py | 33 | 11750 | #!/usr/bin/python
# (c) 2013, Paul Durivage <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_files
short_description: Manipulate Rackspace Cloud Files Containers
description:
- Manipulate Rackspace Cloud Files Containers
version_added: "1.5"
options:
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing containers.
Selecting this option is only appropriate when setting type=meta
choices:
- "yes"
- "no"
default: "no"
container:
description:
- The container to use for container or metadata operations.
required: true
meta:
description:
- A hash of items to set as metadata values on a container
private:
description:
- Used to set a container as private, removing it from the CDN. B(Warning!)
Private containers, if previously made public, can have live objects
available until the TTL on cached objects expires
public:
description:
- Used to set a container as public, available via the Cloud Files CDN
region:
description:
- Region to create an instance in
default: DFW
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
ttl:
description:
- In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
Setting a TTL is only appropriate for containers that are public
type:
description:
- Type of object to do work on, i.e. metadata object or a container object
choices:
- file
- meta
default: file
web_error:
description:
- Sets an object to be presented as the HTTP error page when accessed by the CDN URL
web_index:
description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
author: "Paul Durivage (@angstwad)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: "Test Cloud Files Containers"
hosts: local
gather_facts: no
tasks:
- name: "List all containers"
rax_files:
state: list
- name: "Create container called 'mycontainer'"
rax_files:
container: mycontainer
- name: "Create container 'mycontainer2' with metadata"
rax_files:
container: mycontainer2
meta:
key: value
file_for: [email protected]
- name: "Set a container's web index page"
rax_files:
container: mycontainer
web_index: index.html
- name: "Set a container's web error page"
rax_files:
container: mycontainer
web_error: error.html
- name: "Make container public"
rax_files:
container: mycontainer
public: yes
- name: "Make container public with a 24 hour TTL"
rax_files:
container: mycontainer
public: yes
ttl: 86400
- name: "Make container private"
rax_files:
container: mycontainer
private: yes
- name: "Test Cloud Files Containers Metadata Storage"
hosts: local
gather_facts: no
tasks:
- name: "Get mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
- name: "Set mycontainer2 metadata"
rax_files:
container: mycontainer2
type: meta
meta:
uploaded_by: [email protected]
- name: "Remove mycontainer2 metadata"
rax_files:
container: "mycontainer2"
type: meta
state: absent
meta:
key: ""
file_for: ""
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError as e:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import rax_argument_spec, rax_required_together, setup_rax_module
EXIT_DICT = dict(success=True)
META_PREFIX = 'x-container-meta-'
def _get_container(module, cf, container):
try:
return cf.get_container(container)
except pyrax.exc.NoSuchContainer as e:
module.fail_json(msg=e.message)
def _fetch_meta(module, container):
EXIT_DICT['meta'] = dict()
try:
for k, v in container.get_metadata().items():
split_key = k.split(META_PREFIX)[-1]
EXIT_DICT['meta'][split_key] = v
except Exception as e:
module.fail_json(msg=e.message)
def meta(cf, module, container_, state, meta_, clear_meta):
c = _get_container(module, cf, container_)
if meta_ and state == 'present':
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
elif meta_ and state == 'absent':
remove_results = []
for k, v in meta_.items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
elif state == 'absent':
remove_results = []
for k, v in c.get_metadata().items():
c.remove_metadata_key(k)
remove_results.append(k)
EXIT_DICT['deleted_meta_keys'] = remove_results
_fetch_meta(module, c)
_locals = locals().keys()
EXIT_DICT['container'] = c.name
if 'meta_set' in _locals or 'remove_results' in _locals:
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
private, web_index, web_error):
if public and private:
module.fail_json(msg='container cannot be simultaneously '
'set to public and private')
if state == 'absent' and (meta_ or clear_meta or public or private or web_index or web_error):
module.fail_json(msg='state cannot be omitted when setting/removing '
'attributes on a container')
if state == 'list':
# We don't care if attributes are specified, let's list containers
EXIT_DICT['containers'] = cf.list_containers()
module.exit_json(**EXIT_DICT)
try:
c = cf.get_container(container_)
except pyrax.exc.NoSuchContainer as e:
# Make the container if state=present, otherwise bomb out
if state == 'present':
try:
c = cf.create_container(container_)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['changed'] = True
EXIT_DICT['created'] = True
else:
module.fail_json(msg=e.message)
else:
# Successfully grabbed a container object
# Delete if state is absent
if state == 'absent':
try:
cont_deleted = c.delete()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['deleted'] = True
if meta_:
try:
meta_set = c.set_metadata(meta_, clear=clear_meta)
except Exception as e:
module.fail_json(msg=e.message)
finally:
_fetch_meta(module, c)
if ttl:
try:
c.cdn_ttl = ttl
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['ttl'] = c.cdn_ttl
if public:
try:
cont_public = c.make_public()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['container_urls'] = dict(url=c.cdn_uri,
ssl_url=c.cdn_ssl_uri,
streaming_url=c.cdn_streaming_uri,
ios_uri=c.cdn_ios_uri)
if private:
try:
cont_private = c.make_private()
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_private'] = True
if web_index:
try:
cont_web_index = c.set_web_index_page(web_index)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_index'] = True
finally:
_fetch_meta(module, c)
if web_error:
try:
cont_err_index = c.set_web_error_page(web_error)
except Exception as e:
module.fail_json(msg=e.message)
else:
EXIT_DICT['set_error'] = True
finally:
_fetch_meta(module, c)
EXIT_DICT['container'] = c.name
EXIT_DICT['objs_in_container'] = c.object_count
EXIT_DICT['total_bytes'] = c.total_bytes
_locals = locals().keys()
if ('cont_deleted' in _locals
or 'meta_set' in _locals
or 'cont_public' in _locals
or 'cont_private' in _locals
or 'cont_web_index' in _locals
or 'cont_err_index' in _locals):
EXIT_DICT['changed'] = True
module.exit_json(**EXIT_DICT)
def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error):
""" Dispatch from here to work with metadata or file objects """
cf = pyrax.cloudfiles
if cf is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if typ == "container":
container(cf, module, container_, state, meta_, clear_meta, ttl,
public, private, web_index, web_error)
else:
meta(cf, module, container_, state, meta_, clear_meta)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
container=dict(),
state=dict(choices=['present', 'absent', 'list'],
default='present'),
meta=dict(type='dict', default=dict()),
clear_meta=dict(default=False, type='bool'),
type=dict(choices=['container', 'meta'], default='container'),
ttl=dict(type='int'),
public=dict(default=False, type='bool'),
private=dict(default=False, type='bool'),
web_index=dict(),
web_error=dict()
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
container_ = module.params.get('container')
state = module.params.get('state')
meta_ = module.params.get('meta')
clear_meta = module.params.get('clear_meta')
typ = module.params.get('type')
ttl = module.params.get('ttl')
public = module.params.get('public')
private = module.params.get('private')
web_index = module.params.get('web_index')
web_error = module.params.get('web_error')
if state in ['present', 'absent'] and not container_:
module.fail_json(msg='please specify a container name')
if clear_meta and not typ == 'meta':
module.fail_json(msg='clear_meta can only be used when setting '
'metadata')
setup_rax_module(module, pyrax)
cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
private, web_index, web_error)
if __name__ == '__main__':
main()
| gpl-3.0 |
wanghao524151/scrapy_joy | open_insurance/models.py | 1 | 2916 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.db.models.signals import pre_save
from scrapy.contrib.djangoitem import DjangoItem
from dynamic_scraper.models import Scraper, SchedulerRuntime
class InsuranceWebsite(models.Model):
site = models.CharField(u'平台名称', max_length=20)
name = models.CharField(u'产品名称', max_length=200)
area = models.CharField(u'地域', max_length=20, default='', null=True, blank=True)
category = models.CharField(u'产品类型', max_length=20, default='', null=True, blank=True)
url = models.URLField(u'爬取链接')
scraper = models.ForeignKey(Scraper, verbose_name=u'爬虫', blank=True, null=True, on_delete=models.SET_NULL)
scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __unicode__(self):
return self.site + self.name + '[' + str(self.category) + ' ' + str(self.area) + ']'
class Meta:
verbose_name = u'保险平台'
verbose_name_plural = u'保险平台'
class Insurance(models.Model):
TERM_UNIT_CHOICES = (
('day', u'天'),
('month', u'月'),
)
insurance_website = models.ForeignKey(InsuranceWebsite, verbose_name=u'保险平台')
title = models.CharField(u'借款标题', max_length=200)
amount = models.FloatField(u'金额(元)', default=0)
year_rate = models.FloatField(u'年利率%', default=0)
duration = models.IntegerField(u'期限(天)', default=0)
term = models.IntegerField(u'期限', default=0)
term_unit = models.CharField(u'期限单位', max_length=10, choices=TERM_UNIT_CHOICES)
url = models.URLField(u'链接地址', blank=True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
created = models.DateTimeField(u'录入时间', auto_now_add=True)
def __unicode__(self):
return self.title
class Meta:
verbose_name = u'保险'
verbose_name_plural = u'保险'
class InsuranceItem(DjangoItem):
django_model = Insurance
@receiver(pre_delete)
def pre_delete_handler(sender, instance, using, **kwargs):
if isinstance(instance, InsuranceWebsite):
if instance.scraper_runtime:
instance.scraper_runtime.delete()
if isinstance(instance, Insurance):
if instance.checker_runtime:
instance.checker_runtime.delete()
pre_delete.connect(pre_delete_handler)
@receiver(pre_save, sender=Insurance, dispatch_uid='open_insurance.insurance')
def loan_push_product(sender, **kwargs):
instance = kwargs.get('instance')
if instance.term_unit == 'day':
instance.duration = instance.term
elif instance.term_unit == 'month':
instance.duration = int(instance.term)*30 | apache-2.0 |
craftytrickster/servo | tests/wpt/css-tests/tools/pywebsocket/src/test/test_http_header_util.py | 496 | 3372 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for http_header_util module."""
import unittest
from mod_pywebsocket import http_header_util
class UnitTest(unittest.TestCase):
"""A unittest for http_header_util module."""
def test_parse_relative_uri(self):
host, port, resource = http_header_util.parse_uri('/ws/test')
self.assertEqual(None, host)
self.assertEqual(None, port)
self.assertEqual('/ws/test', resource)
def test_parse_absolute_uri(self):
host, port, resource = http_header_util.parse_uri(
'ws://localhost:10080/ws/test')
self.assertEqual('localhost', host)
self.assertEqual(10080, port)
self.assertEqual('/ws/test', resource)
host, port, resource = http_header_util.parse_uri(
'ws://example.com/ws/test')
self.assertEqual('example.com', host)
self.assertEqual(80, port)
self.assertEqual('/ws/test', resource)
host, port, resource = http_header_util.parse_uri(
'wss://example.com/')
self.assertEqual('example.com', host)
self.assertEqual(443, port)
self.assertEqual('/', resource)
host, port, resource = http_header_util.parse_uri(
'ws://example.com:8080')
self.assertEqual('example.com', host)
self.assertEqual(8080, port)
self.assertEqual('/', resource)
def test_parse_invalid_uri(self):
host, port, resource = http_header_util.parse_uri('ws:///')
self.assertEqual(None, resource)
host, port, resource = http_header_util.parse_uri('ws://localhost:')
self.assertEqual(None, resource)
host, port, resource = http_header_util.parse_uri('ws://localhost:/ws')
self.assertEqual(None, resource)
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 |
redhat-openstack/nova | nova/db/sqlalchemy/utils.py | 14 | 5568 | # Copyright (c) 2013 Boris Pavlovic ([email protected]).
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.db import exception as db_exc
from oslo.db.sqlalchemy import utils as oslodbutils
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import MetaData
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import Table
from sqlalchemy.types import NullType
from nova.db.sqlalchemy import api as db
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class DeleteFromSelect(UpdateBase):
def __init__(self, table, select, column):
self.table = table
self.select = select
self.column = column
# NOTE(guochbo): some verions of MySQL doesn't yet support subquery with
# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
@compiles(DeleteFromSelect)
def visit_delete_from_select(element, compiler, **kw):
return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.column),
element.column.name,
compiler.process(element.select))
def check_shadow_table(migrate_engine, table_name):
"""This method checks that table with ``table_name`` and
corresponding shadow table have same columns.
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
autoload=True)
columns = dict([(c.name, c) for c in table.columns])
shadow_columns = dict([(c.name, c) for c in shadow_table.columns])
for name, column in columns.iteritems():
if name not in shadow_columns:
raise exception.NovaException(
_("Missing column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
shadow_column = shadow_columns[name]
if not isinstance(shadow_column.type, type(column.type)):
raise exception.NovaException(
_("Different types in %(table)s.%(column)s and shadow table: "
"%(c_type)s %(shadow_c_type)s")
% {'column': name, 'table': table.name,
'c_type': column.type,
'shadow_c_type': shadow_column.type})
for name, column in shadow_columns.iteritems():
if name not in columns:
raise exception.NovaException(
_("Extra column %(table)s.%(column)s in shadow table")
% {'column': name, 'table': shadow_table.name})
return True
def create_shadow_table(migrate_engine, table_name=None, table=None,
**col_name_col_instance):
"""This method create shadow table for table with name ``table_name``
or table instance ``table``.
:param table_name: Autoload table with this name and create shadow table
:param table: Autoloaded table, so just create corresponding shadow table.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params are required only for
columns that have unsupported types by sqlite. For example BigInteger.
:returns: The created shadow_table object.
"""
meta = MetaData(bind=migrate_engine)
if table_name is None and table is None:
raise exception.NovaException(_("Specify `table_name` or `table` "
"param"))
if not (table_name is None or table is None):
raise exception.NovaException(_("Specify only one param `table_name` "
"`table`"))
if table is None:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
if isinstance(column.type, NullType):
new_column = oslodbutils._get_not_supported_column(
col_name_col_instance, column.name)
columns.append(new_column)
else:
columns.append(column.copy())
shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
return shadow_table
except (db_exc.DBError, OperationalError):
# NOTE(ekudryashova): At the moment there is a case in oslo.db code,
# which raises unwrapped OperationalError, so we should catch it until
# oslo.db would wraps all such exceptions
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
raise exception.ShadowTableExists(name=shadow_table_name)
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_('Exception while creating table.'))
| apache-2.0 |
krieger-od/nwjs_chromium.src | tools/telemetry/telemetry/timeline/inspector_importer.py | 12 | 2626 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Imports event data obtained from the inspector's timeline.'''
import telemetry.timeline.slice as tracing_slice
import telemetry.timeline.thread as timeline_thread
from telemetry.timeline import importer
from telemetry.timeline import trace_data as trace_data_module
class InspectorTimelineImporter(importer.TimelineImporter):
def __init__(self, model, trace_data):
super(InspectorTimelineImporter, self).__init__(model,
trace_data,
import_order=1)
self._events = trace_data.GetEventsFor(
trace_data_module.INSPECTOR_TRACE_PART)
@staticmethod
def GetSupportedPart():
return trace_data_module.INSPECTOR_TRACE_PART
def ImportEvents(self):
render_process = self._model.GetOrCreateProcess(0)
for raw_event in self._events:
thread = render_process.GetOrCreateThread(raw_event.get('thread', 0))
InspectorTimelineImporter.AddRawEventToThreadRecursive(thread, raw_event)
def FinalizeImport(self):
pass
@staticmethod
def AddRawEventToThreadRecursive(thread, raw_inspector_event):
pending_slice = None
if ('startTime' in raw_inspector_event and
'type' in raw_inspector_event):
args = {}
for x in raw_inspector_event:
if x in ('startTime', 'endTime', 'children'):
continue
args[x] = raw_inspector_event[x]
if len(args) == 0:
args = None
start_time = raw_inspector_event['startTime']
end_time = raw_inspector_event.get('endTime', start_time)
pending_slice = tracing_slice.Slice(
thread, 'inspector',
raw_inspector_event['type'],
start_time,
thread_timestamp=None,
args=args)
for child in raw_inspector_event.get('children', []):
InspectorTimelineImporter.AddRawEventToThreadRecursive(
thread, child)
if pending_slice:
pending_slice.duration = end_time - pending_slice.start
thread.PushSlice(pending_slice)
@staticmethod
def RawEventToTimelineEvent(raw_inspector_event):
"""Converts raw_inspector_event to TimelineEvent."""
thread = timeline_thread.Thread(None, 0)
InspectorTimelineImporter.AddRawEventToThreadRecursive(
thread, raw_inspector_event)
thread.FinalizeImport()
assert len(thread.toplevel_slices) <= 1
if len(thread.toplevel_slices) == 0:
return None
return thread.toplevel_slices[0]
| bsd-3-clause |
avedaee/DIRAC | WorkloadManagementSystem/Service/OptimizationMindHandler.py | 1 | 9803 | import types
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DEncode, ThreadScheduler
from DIRAC.Core.Security import Properties
from DIRAC.Core.Base.ExecutorMindHandler import ExecutorMindHandler
from DIRAC.WorkloadManagementSystem.Client.JobState.JobState import JobState
from DIRAC.WorkloadManagementSystem.Client.JobState.CachedJobState import CachedJobState
from DIRAC.WorkloadManagementSystem.Client.JobState.OptimizationTask import OptimizationTask
class OptimizationMindHandler( ExecutorMindHandler ):
__jobDB = False
__optimizationStates = [ 'Received', 'Checking' ]
__loadTaskId = False
MSG_DEFINITIONS = { 'OptimizeJobs' : { 'jids' : ( types.ListType, types.TupleType ) } }
auth_msg_OptimizeJobs = [ 'all' ]
def msg_OptimizeJobs( self, msgObj ):
jids = msgObj.jids
for jid in jids:
try:
jid = int( jid )
except ValueError:
self.log.error( "Job ID %s has to be an integer" % jid )
#Forget and add task to ensure state is reset
self.forgetTask( jid )
result = self.executeTask( jid, OptimizationTask( jid ) )
if not result[ 'OK' ]:
self.log.error( "Could not add job %s to optimization: %s" % ( jid, result[ 'Value' ] ) )
else:
self.log.info( "Received new job %s" % jid )
return S_OK()
@classmethod
def __loadJobs( cls, eTypes = None ):
log = cls.log
if cls.__loadTaskId:
period = cls.srv_getCSOption( "LoadJobPeriod", 300 )
ThreadScheduler.gThreadScheduler.setTaskPeriod( cls.__loadTaskId, period )
if not eTypes:
eConn = cls.getExecutorsConnected()
eTypes = [ eType for eType in eConn if eConn[ eType ] > 0 ]
if not eTypes:
log.info( "No optimizer connected. Skipping load" )
return S_OK()
log.info( "Getting jobs for %s" % ",".join( eTypes ) )
checkingMinors = [ eType.split( "/" )[1] for eType in eTypes if eType != "WorkloadManagement/JobPath" ]
for opState in cls.__optimizationStates:
#For Received states
if opState == "Received":
if 'WorkloadManagement/JobPath' not in eTypes:
continue
jobCond = { 'Status' : opState }
#For checking states
if opState == "Checking":
if not checkingMinors:
continue
jobCond = { 'Status': opState, 'MinorStatus' : checkingMinors }
#Do the magic
jobTypeCondition = cls.srv_getCSOption( "JobTypeRestriction", [] )
if jobTypeCondition:
jobCond[ 'JobType' ] = jobTypeCondition
result = cls.__jobDB.selectJobs( jobCond, limit = cls.srv_getCSOption( "JobQueryLimit", 10000 ) )
if not result[ 'OK' ]:
return result
jidList = result[ 'Value' ]
knownJids = cls.getTaskIds()
added = 0
for jid in jidList:
jid = long( jid )
if jid not in knownJids:
#Same as before. Check that the state is ok.
cls.executeTask( jid, OptimizationTask( jid ) )
added += 1
log.info( "Added %s/%s jobs for %s state" % ( added, len( jidList ), opState ) )
return S_OK()
@classmethod
def initializeHandler( cls, serviceInfoDict ):
try:
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
cls.__jobDB = JobDB()
except Exception, excp:
return S_ERROR( "Could not connect to JobDB: %s" % str( excp ) )
cls.setFailedOnTooFrozen( False )
cls.setFreezeOnFailedDispatch( False )
cls.setFreezeOnUnknownExecutor( False )
cls.setAllowedClients( "JobManager" )
JobState.checkDBAccess()
JobState.cleanTaskQueues()
period = cls.srv_getCSOption( "LoadJobPeriod", 60 )
result = ThreadScheduler.gThreadScheduler.addPeriodicTask( period, cls.__loadJobs )
if not result[ 'OK' ]:
return result
cls.__loadTaskId = result[ 'Value' ]
return cls.__loadJobs()
@classmethod
def exec_executorConnected( cls, trid, eTypes ):
return cls.__loadJobs( eTypes )
@classmethod
def __failJob( cls, jid, minorStatus, appStatus = "" ):
cls.forgetTask( jid )
cls.__jobDB.setJobStatus( jid, "Failed", minorStatus, appStatus )
@classmethod
def __splitJob( cls, jid, manifests ):
cls.log.notice( "Splitting job %s" % jid )
try:
result = cls.__jobDB.insertSplittedManifests( jid, manifests )
if not result[ 'OK' ]:
cls.__failJob( jid, "Error while splitting", result[ 'Message' ] )
return S_ERROR( "Fail splitting" )
for jid in result[ 'Value' ]:
cls.forgetTask( jid )
cls.executeTask( jid, OptimizationTask( jid ) )
except Exception, excp:
cls.log.exception( "While splitting" )
cls.__failJob( jid, "Error while splitting", str( excp ) )
return S_OK()
@classmethod
def exec_taskProcessed( cls, jid, taskObj, eType ):
cjs = taskObj.jobState
cls.log.info( "Saving changes for job %s after %s" % ( jid, eType ) )
result = cjs.commitChanges()
if not result[ 'OK' ]:
cls.log.error( "Could not save changes for job", "%s: %s" % ( jid, result[ 'Message' ] ) )
return result
if taskObj.splitManifests:
return cls.__splitJob( jid, taskObj.splitManifests )
if taskObj.tqReady:
result = cjs.getManifest()
if not result[ 'OK' ]:
cls.log.error( "Could not get manifest before inserting into TQ", "%s: %s" % ( jid, result[ 'Message' ] ) )
return result
manifest = result[ 'Value' ]
result = cjs.jobState.insertIntoTQ( manifest )
if not result[ 'OK' ]:
cls.log.error( "Could not insert into TQ", "%s: %s" % ( jid, result[ 'Message' ] ) )
return result
return S_OK()
@classmethod
def exec_taskFreeze( cls, jid, taskObj, eType ):
jobState = taskObj.jobState
cls.log.info( "Saving changes for job %s before freezing from %s" % ( jid, eType ) )
result = jobState.commitChanges()
if not result[ 'OK' ]:
cls.log.error( "Could not save changes for job", "%s: %s" % ( jid, result[ 'Message' ] ) )
return result
@classmethod
def exec_dispatch( cls, jid, taskObj, pathExecuted ):
jobState = taskObj.jobState
result = jobState.getStatus()
if not result[ 'OK' ]:
cls.log.error( "Could not get status for job", "%s: %s" % ( jid, result[ 'Message' ] ) )
return S_ERROR( "Could not retrieve status: %s" % result[ 'Message' ] )
status, minorStatus = result[ 'Value' ]
#If not in proper state then end chain
if status not in cls.__optimizationStates:
cls.log.info( "Dispatching job %s out of optimization" % jid )
return S_OK()
#If received send to JobPath
if status == "Received":
cls.log.info( "Dispatching job %s to JobPath" % jid )
return S_OK( "WorkloadManagement/JobPath" )
result = jobState.getOptParameter( 'OptimizerChain' )
if not result[ 'OK' ]:
cls.log.error( "Could not get optimizer chain for job, auto resetting job", "%s: %s" % ( jid, result[ 'Message' ] ) )
result = jobState.resetJob()
if not result[ 'OK' ]:
cls.log.error( "Could not reset job", "%s: %s" % ( jid, result[ 'Message' ] ) )
return S_ERROR( "Cound not get OptimizationChain or reset job %s" % jid )
return S_OK( "WorkloadManagement/JobPath" )
optChain = result[ 'Value' ]
if minorStatus not in optChain:
cls.log.error( "Next optimizer is not in the chain for job", "%s: %s not in %s" % ( jid, minorStatus, optChain ) )
return S_ERROR( "Next optimizer %s not in chain %s" % ( minorStatus, optChain ) )
cls.log.info( "Dispatching job %s to %s" % ( jid, minorStatus ) )
return S_OK( "WorkloadManagement/%s" % minorStatus )
@classmethod
def exec_prepareToSend( cls, jid, taskObj, eId ):
return taskObj.jobState.recheckValidity()
@classmethod
def exec_serializeTask( cls, taskObj ):
return S_OK( taskObj.serialize() )
@classmethod
def exec_deserializeTask( cls, taskStub ):
return OptimizationTask.deserialize( taskStub )
@classmethod
def exec_taskError( cls, jid, taskObj, errorMsg ):
result = taskObj.jobState.commitChanges()
if not result[ 'OK' ]:
cls.log.error( "Cannot write changes to job %s: %s" % ( jid, result[ 'Message' ] ) )
jobState = JobState( jid )
result = jobState.getStatus()
if result[ 'OK' ]:
if result[ 'Value' ][0].lower() == "failed":
return S_OK()
else:
cls.log.error( "Could not get status of job %s: %s" % ( jid, result[ 'Message ' ] ) )
cls.log.notice( "Job %s: Setting to Failed|%s" % ( jid, errorMsg ) )
return jobState.setStatus( "Failed", errorMsg, source = 'OptimizationMindHandler' )
auth_stageCallback = [ Properties.OPERATOR ]
types_stageCallback = ( ( types.StringType, types.IntType, types.LongType ), types.StringType )
def export_stageCallback( self, jid, stageStatus ):
""" Simple call back method to be used by the stager. """
try:
jid = int( jid )
except ValueError:
return S_ERROR( "Job ID is not a number!" )
failed = False
if stageStatus == 'Done':
major = 'Checking'
minor = 'InputDataValidation'
elif stageStatus == 'Failed':
major = 'Failed'
minor = 'Staging input files failed'
failed = True
else:
return S_ERROR( "%s status not known." % stageStatus )
result = self.__jobDB.getJobAttributes( jid, ['Status'] )
if not result['OK']:
return result
data = result[ 'Value' ]
if not data:
return S_OK( 'No Matching Job' )
if data[ 'Status' ] != 'Staging':
return S_OK( 'Job %s is not in Staging' % jid )
jobState = JobState( jid )
result = jobState.setStatus( major, minor, source = "StagerSystem" )
if not result[ 'OK' ]:
return result
if failed:
return S_OK()
return self.executeTask( jid, OptimizationTask( jid ) )
| gpl-3.0 |
sunils34/buffer-django-nonrel | django/core/xheaders.py | 518 | 1157 | """
Pages in Django can are served up with custom HTTP headers containing useful
information about those pages -- namely, the content type and object ID.
This module contains utility functions for retrieving and doing interesting
things with these special "X-Headers" (so called because the HTTP spec demands
that custom headers are prefixed with "X-").
Next time you're at slashdot.org, watch out for X-Fry and X-Bender. :)
"""
def populate_xheaders(request, response, model, object_id):
"""
Adds the "X-Object-Type" and "X-Object-Id" headers to the given
HttpResponse according to the given model and object_id -- but only if the
given HttpRequest object has an IP address within the INTERNAL_IPS setting
or if the request is from a logged in staff member.
"""
from django.conf import settings
if (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
or (hasattr(request, 'user') and request.user.is_active
and request.user.is_staff)):
response['X-Object-Type'] = "%s.%s" % (model._meta.app_label, model._meta.object_name.lower())
response['X-Object-Id'] = str(object_id)
| bsd-3-clause |
jannekai/project-euler | 017.py | 1 | 1222 | import time
import math
import sys
start = time.time()
w = dict()
w[1] = "one"
w[2] = "two"
w[3] = "three"
w[4] = "four"
w[5] = "five"
w[6] = "six"
w[7] = "seven"
w[8] = "eight"
w[9] = "nine"
w[10] = "ten"
w[11] = "eleven"
w[12] = "twelve"
w[13] = "thirteen"
w[14] = "fourteen"
w[15] = "fifteen"
w[16] = "sixteen"
w[17] = "seventeen"
w[18] = "eighteen"
w[19] = "nineteen"
w[20] = "twenty"
w[30] = "thirty"
w[40] = "forty"
w[50] = "fifty"
w[60] = "sixty"
w[70] = "seventy"
w[80] = "eighty"
w[90] = "ninety"
def tens(x):
global w
if x%10 == 0:
return w[x//10*10]
else:
return w[x//10*10] + "-" + w[x%10]
def letters(s):
c = 0
for i in range(0, len(s)):
if s[i] != " " and s[i] != "-":
c += 1
return c
n = dict()
c = letters("one thousand")
for i in range(1, 1000):
s = ""
if i < 20:
s += w[i]
elif i < 100:
s += tens(i)
elif i < 1000:
if i%100 == 0:
s += w[i//100] + " hundred "
else:
if i%100 < 20:
s += w[i//100] + " hundred and " + w[i%100]
else:
s += w[i//100] + " hundred and " + tens(i%100)
c += letters(s)
print c
end = time.time() - start
print "Total time was " + str(end)+ " seconds"
| mit |
dreucifer/chargenstart | product/views.py | 1 | 2302 | """ @todo """
from flask import render_template, request, flash, session, Blueprint, abort
from product.models import Product, Category
from cart.forms import AddToCartForm
from product.utils import get_or_404, first_or_abort, get_for_page, get_by_slug
from cart import ShoppingCart, SessionCart
Products = Blueprint('products', __name__, url_prefix='/parts',
template_folder='templates', static_folder='static')
import product.admin
PER_PAGE = 10
@Products.route('/', methods=['POST', 'GET'])
def index():
""" @todo """
cart = ShoppingCart.for_session_cart(request.cart)
categories = Category.query.filter_by(parent=None).all()
return render_template('products.html', cart=cart, categories=categories)
@Products.route('/<category_slug>')
def category(category_slug):
cat = get_by_slug('Category', category_slug)
return render_template('category.html', category=cat)
@Products.route('/catalog/', defaults={'page_number': 1})
@Products.route('/catalog/page/<int:page_number>')
def catalog(page_number):
cat_page = _catalog_page(page_number, per_page=10)
cart = ShoppingCart.for_session_cart(request.cart)
return render_template('catalog.html', cart=cart, catalog_page=cat_page)
@Products.route('/info/<product_id>', methods=['GET', 'POST'])
def details(product_id):
""" @todo """
part = get_or_404(Product, product_id)
cart = ShoppingCart.for_session_cart(request.cart)
form = AddToCartForm(request.form, product=part, cart=cart, product_id=part.id_)
return render_template('details.html', product=part, form=form, cart=cart)
def _catalog_page(page_number, per_page=20):
""" @todo """
pagination = get_for_page(Product.query, page_number, per_page=per_page)
product_listings = [_product_listing(part) for part in pagination.items]
return render_template('_catalog.html',
pagination=pagination,
parts=product_listings)
@Products.app_template_global()
def _product_listing(part):
cart = ShoppingCart.for_session_cart(request.cart)
form = AddToCartForm(request.form, product=part,
cart=cart, product_id=part.id_)
return render_template('_product_listing.html', form=form, product=part,
cart=cart)
| mit |
2hdddg/pyvidstream | test/vidanalyze.py | 1 | 3170 | from unittest import TestCase
from vidutil.vidstream import Frame
import vidutil.vidanalyze as a
def _num_f(num):
f = Frame(type='P', key_frame=False, width=1, height=1,
coded_picture_number=num)
return f
class TestSplitFramesMissing(TestCase):
def test_no_missing(self):
""" Verifies that a list of frames with no missing
frames are not split.
"""
frames = [
_num_f(1),
_num_f(2),
_num_f(3)
]
splitted = a.split_frames_by_missing(frames)
self.assertEqual(len(splitted), 1)
self.assertListEqual(frames, splitted[0])
def test_one_missing(self):
""" Verifies that a list of frames with a missing
frame in the middle are split into two parts.
"""
frames = [
_num_f(1),
_num_f(2),
_num_f(4),
_num_f(5)
]
splitted = a.split_frames_by_missing(frames)
self.assertEqual(len(splitted), 2)
self.assertListEqual(frames[0:2], splitted[0])
self.assertListEqual(frames[2:4], splitted[1])
def test_two_missing(self):
""" Verifies that a list of frames with two missing
frames are split into three parts.
"""
frames = [
_num_f(1),
_num_f(4),
_num_f(5),
_num_f(9),
_num_f(10),
_num_f(11),
]
splitted = a.split_frames_by_missing(frames)
self.assertEqual(len(splitted), 3)
self.assertListEqual(frames[0:1], splitted[0])
self.assertListEqual(frames[1:3], splitted[1])
self.assertListEqual(frames[3:6], splitted[2])
def test_empty(self):
""" Verifies that an empty list is returned
as an empty list.
"""
splitted = a.split_frames_by_missing([])
self.assertEqual(len(splitted), 1)
self.assertListEqual([], splitted[0])
def test_number_out_of_order(self):
""" Test that an exception is thrown if the
numbers are out of order
"""
frames = [
_num_f(2),
_num_f(1)
]
with self.assertRaises(a.FrameSeqException):
a.split_frames_by_missing(frames)
def test_same_number(self):
""" Test that an exception is thrown if same
number occures twice in a row
"""
frames = [
_num_f(2),
_num_f(2)
]
with self.assertRaises(a.FrameSeqException):
a.split_frames_by_missing(frames)
class TestAreFramesMissing(TestCase):
def test_no_missing(self):
""" Tests that False is returned when no
frames are missing
"""
frames = [
_num_f(2),
_num_f(3),
_num_f(4),
]
self.assertFalse(a.are_frames_missing(frames))
def test_missing(self):
""" Tests that True is returned when
frames are missing
"""
frames = [
_num_f(2),
_num_f(4),
_num_f(5),
]
self.assertTrue(a.are_frames_missing(frames))
| mit |
chafique-delli/OpenUpgrade | addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/actions.py | 382 | 3763 | ##########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer [email protected]
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
##############################################################################
import uno
import unohelper
import os
#--------------------------------------------------
# An ActionListener adapter.
# This object implements com.sun.star.awt.XActionListener.
# When actionPerformed is called, this will call an arbitrary
# python procedure, passing it...
# 1. the oActionEvent
# 2. any other parameters you specified to this object's constructor (as a tuple).
if __name__<>"package":
os.system( "ooffice '-accept=socket,host=localhost,port=2002;urp;'" )
passwd=""
database=""
uid=""
loginstatus=False
from com.sun.star.awt import XActionListener
class ActionListenerProcAdapter( unohelper.Base, XActionListener ):
def __init__( self, oProcToCall, tParams=() ):
self.oProcToCall = oProcToCall # a python procedure
self.tParams = tParams # a tuple
# oActionEvent is a com.sun.star.awt.ActionEvent struct.
def actionPerformed( self, oActionEvent ):
if callable( self.oProcToCall ):
apply( self.oProcToCall, (oActionEvent,) + self.tParams )
#--------------------------------------------------
# An ItemListener adapter.
# This object implements com.sun.star.awt.XItemListener.
# When itemStateChanged is called, this will call an arbitrary
# python procedure, passing it...
# 1. the oItemEvent
# 2. any other parameters you specified to this object's constructor (as a tuple).
from com.sun.star.awt import XItemListener
class ItemListenerProcAdapter( unohelper.Base, XItemListener ):
def __init__( self, oProcToCall, tParams=() ):
self.oProcToCall = oProcToCall # a python procedure
self.tParams = tParams # a tuple
# oItemEvent is a com.sun.star.awt.ItemEvent struct.
def itemStateChanged( self, oItemEvent ):
if callable( self.oProcToCall ):
apply( self.oProcToCall, (oItemEvent,) + self.tParams )
#--------------------------------------------------
# An TextListener adapter.
# This object implements com.sun.star.awt.XTextistener.
# When textChanged is called, this will call an arbitrary
# python procedure, passing it...
# 1. the oTextEvent
# 2. any other parameters you specified to this object's constructor (as a tuple).
from com.sun.star.awt import XTextListener
class TextListenerProcAdapter( unohelper.Base, XTextListener ):
def __init__( self, oProcToCall, tParams=() ):
self.oProcToCall = oProcToCall # a python procedure
self.tParams = tParams # a tuple
# oTextEvent is a com.sun.star.awt.TextEvent struct.
def textChanged( self, oTextEvent ):
if callable( self.oProcToCall ):
apply( self.oProcToCall, (oTextEvent,) + self.tParams )
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
9corp/9volt | vendor/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| mit |
SjorsVanGelderen/Graduation | python_3/data_structures/heap.py | 1 | 5731 | """Heap data structure example
Copyright 2017, Sjors van Gelderen
"""
import random
"""Heap data structure
The 'max' property determines whether this is a max or min heap
"""
class Heap:
def __init__(self, property):
self.property = property
self.keys = []
def __repr__(self):
return "{} heap containing {}".format(self.property, self.keys)
# Complexity: O(1)
@staticmethod
def get_parent_index(key_index):
return (key_index + 1) // 2 - 1
# Complexity: O(1)
@staticmethod
def get_left_child_index(key_index):
return 2 * (key_index + 1) - 1
# Complexity: O(1)
@staticmethod
def get_right_child_index(key_index):
return 2 * (key_index + 1)
"""Swap operation
Complexity: O(1)
"""
def swap(self, left_index, right_index):
print("{} <-> {}".format(self.keys[left_index], self.keys[right_index]))
temp = self.keys[left_index]
self.keys[left_index] = self.keys[right_index]
self.keys[right_index] = temp
"""Insert operation
Complexity: O(log n)
"""
def insert(self, value):
print("Inserting {}".format(value))
# Add the key
self.keys.append(value)
key_index = len(self.keys) - 1
# Swim through to restore property
while True:
if key_index == 0:
# This root cannot have parents
print("Root node, no swimming to be done")
break
# Query parent
parent_index = Heap.get_parent_index(key_index)
parent = self.keys[parent_index]
# Verify if property holds
holds = value <= parent if self.property == "MIN" else value >= parent
if holds:
print("Before swap: {}".format(self))
self.swap(key_index, parent_index)
print("After swap: {}".format(self))
key_index = parent_index # Continue swimming on the new position
else:
message = "{} >= {}" if self.property == "MIN" else "{} <= {}"
print("Property holds: " + message.format(value, parent))
# Done swimming, the property now holds
break;
print("Finished adding {}".format(value))
"""Extract operation
Complexity: O(log n)
"""
def extract(self):
if len(self.keys) == 1:
print("Extracting {}".format(self.keys[0]))
self.keys = []
elif len(self.keys) > 1:
# Replace root with last key
print("Extracting {}".format(self.keys[0]))
self.keys[0] = self.keys[len(self.keys) - 1]
self.keys.pop()
print("New root: {}".format(self.keys[0]))
# Restore heap property
self.heapify()
else:
print("Nothing to extract")
"""Heapify operation tailored to be used after extraction
Complexity: O(log n)
"""
def heapify(self):
print("Restoring heap property")
key_index = 0
# Loop until the heap property is restored
while True:
left_child_index = Heap.get_left_child_index(key_index)
right_child_index = Heap.get_right_child_index(key_index)
child_index = -1
if left_child_index < len(self.keys):
child_index = left_child_index
print("Child index: {}".format(child_index))
if right_child_index < len(self.keys):
left_child = self.keys[left_child_index]
right_child = self.keys[right_child_index]
if self.property == "MIN":
# Target child will be the smaller one
if left_child > right_child:
child_index = right_child_index
print("Child index updated: {}".format(child_index))
else:
# Target child will be the larger one
if left_child <= right_child:
child_index = right_child_index
print("Child index updated: {}".format(child_index))
key = self.keys[key_index]
child_key = self.keys[child_index]
swap = key > child_key if self.property == "MIN" else key < child_key
if swap:
# Swap elements to further restore the property
self.swap(key_index, child_index)
# Set key index for next iteration
key_index = child_index
else:
# Property holds
print("Property holds, no swap necessary")
break
else:
print("No further children")
break
print("Finished extraction")
# Main program logic
def program():
# Build a min heap
print("Constructing min heap:")
min_heap = Heap("MIN")
for i in range(8):
min_heap.insert(random.randrange(100))
print("Result: {}\n".format(min_heap))
print("Extracting from min heap:")
min_heap.extract()
print("Result: {}\n".format(min_heap))
# Build a max heap
print("Constructing max heap:")
max_heap = Heap("MAX")
for i in range(8):
max_heap.insert(random.randrange(100))
print("Result: {}\n".format(max_heap))
print("Extracting from max heap:")
max_heap.extract()
print("Result: {}\n".format(max_heap))
# Run the program
program()
| mit |
corburn/scikit-bio | skbio/alignment/__init__.py | 4 | 7301 | r"""
Alignments and Sequence collections (:mod:`skbio.alignment`)
============================================================
.. currentmodule:: skbio.alignment
This module provides functionality for working with biological sequence
collections and alignments. These can be composed of generic sequences,
nucelotide sequences, DNA sequences, and RNA sequences. By default, input is
not validated, except that sequence ids must be unique, but all
contructor methods take a validate option which checks different features of
the input based on ``SequenceCollection`` type.
Data Structures
---------------
.. autosummary::
:toctree: generated/
SequenceCollection
Alignment
TabularMSA
Optimized (i.e., production-ready) Alignment Algorithms
-------------------------------------------------------
.. autosummary::
:toctree: generated/
StripedSmithWaterman
AlignmentStructure
local_pairwise_align_ssw
Slow (i.e., educational-purposes only) Alignment Algorithms
-----------------------------------------------------------
.. autosummary::
:toctree: generated/
global_pairwise_align_nucleotide
global_pairwise_align_protein
global_pairwise_align
local_pairwise_align_nucleotide
local_pairwise_align_protein
local_pairwise_align
General functionality
---------------------
.. autosummary::
:toctree: generated/
make_identity_substitution_matrix
Exceptions
----------
.. autosummary::
:toctree: generated/
SequenceCollectionError
AlignmentError
Data Structure Examples
-----------------------
>>> from skbio import SequenceCollection, Alignment, DNA
>>> seqs = [DNA("ACC--G-GGTA..", metadata={'id':"seq1"}),
... DNA("TCC--G-GGCA..", metadata={'id':"seqs2"})]
>>> a1 = Alignment(seqs)
>>> a1
<Alignment: n=2; mean +/- std length=13.00 +/- 0.00>
>>> seqs = [DNA("ACCGGG", metadata={'id':"seq1"}),
... DNA("TCCGGGCA", metadata={'id':"seq2"})]
>>> s1 = SequenceCollection(seqs)
>>> s1
<SequenceCollection: n=2; mean +/- std length=7.00 +/- 1.00>
>>> fasta_lines = ['>seq1\n',
... 'CGATGTCGATCGATCGATCGATCAG\n',
... '>seq2\n',
... 'CATCGATCGATCGATGCATGCATGCATG\n']
>>> s1 = SequenceCollection.read(fasta_lines, constructor=DNA)
>>> s1
<SequenceCollection: n=2; mean +/- std length=26.50 +/- 1.50>
Alignment Algorithm Examples
----------------------------
Optimized Alignment Algorithm Examples
--------------------------------------
Using the convenient ``local_pairwise_align_ssw`` function:
>>> from skbio.alignment import local_pairwise_align_ssw
>>> alignment = local_pairwise_align_ssw(
... "ACTAAGGCTCTCTACCCCTCTCAGAGA",
... "ACTAAGGCTCCTAACCCCCTTTTCTCAGA"
... )
>>> print(alignment)
>query
ACTAAGGCTCTC-TACCC----CTCTCAGA
>target
ACTAAGGCTC-CTAACCCCCTTTTCTCAGA
<BLANKLINE>
Using the ``StripedSmithWaterman`` object:
>>> from skbio.alignment import StripedSmithWaterman
>>> query = StripedSmithWaterman("ACTAAGGCTCTCTACCCCTCTCAGAGA")
>>> alignment = query("AAAAAACTCTCTAAACTCACTAAGGCTCTCTACCCCTCTTCAGAGAAGTCGA")
>>> print(alignment)
ACTAAGGCTC...
ACTAAGGCTC...
Score: 49
Length: 28
Using the ``StripedSmithWaterman`` object for multiple targets in an efficient
way and finding the aligned sequence representations:
>>> from skbio.alignment import StripedSmithWaterman
>>> alignments = []
>>> target_sequences = [
... "GCTAACTAGGCTCCCTTCTACCCCTCTCAGAGA",
... "GCCCAGTAGCTTCCCAATATGAGAGCATCAATTGTAGATCGGGCC",
... "TCTATAAGATTCCGCATGCGTTACTTATAAGATGTCTCAACGG",
... "TAGAGATTAATTGCCACTGCCAAAATTCTG"
... ]
>>> query_sequence = "ACTAAGGCTCTCTACCCCTCTCAGAGA"
>>> query = StripedSmithWaterman(query_sequence)
>>> for target_sequence in target_sequences:
... alignment = query(target_sequence)
... alignments.append(alignment)
...
>>> print(alignments[0])
ACTAAGGCT-...
ACT-AGGCTC...
Score: 38
Length: 30
>>> print(alignments[0].aligned_query_sequence)
ACTAAGGCT---CTCTACCCCTCTCAGAGA
>>> print(alignments[0].aligned_target_sequence)
ACT-AGGCTCCCTTCTACCCCTCTCAGAGA
Slow Alignment Algorithm Examples
---------------------------------
scikit-bio also provides pure-Python implementations of Smith-Waterman and
Needleman-Wunsch alignment. These are much slower than the methods described
above, but serve as useful educational examples as they're simpler to
experiment with. Functions are provided for local and global alignment of
protein and nucleotide sequences. The ``global*`` and ``local*`` functions
differ in the underlying algorithm that is applied (``global*`` uses Needleman-
Wunsch while ``local*`` uses Smith-Waterman), and ``*protein`` and
``*nucleotide`` differ in their default scoring of matches, mismatches, and
gaps.
Here we locally align a pair of protein sequences using gap open penalty
of 11 and a gap extend penalty of 1 (in other words, it is much more
costly to open a new gap than extend an existing one).
>>> from skbio.alignment import local_pairwise_align_protein
>>> s1 = "HEAGAWGHEE"
>>> s2 = "PAWHEAE"
>>> r = local_pairwise_align_protein(s1, s2, 11, 1)
This returns an ``skbio.Alignment`` object. We can look at the aligned
sequences:
>>> print(str(r[0]))
AWGHE
>>> print(str(r[1]))
AW-HE
We can identify the start and end positions of each aligned sequence
as follows:
>>> r.start_end_positions()
[(4, 8), (1, 4)]
And we can view the score of the alignment using the ``score`` method:
>>> r.score()
25.0
Similarly, we can perform global alignment of nucleotide sequences, and print
the resulting alignment in FASTA format:
>>> from skbio.alignment import global_pairwise_align_nucleotide
>>> s1 = "GCGTGCCTAAGGTATGCAAG"
>>> s2 = "ACGTGCCTAGGTACGCAAG"
>>> r = global_pairwise_align_nucleotide(s1, s2)
>>> print(r)
>0
GCGTGCCTAAGGTATGCAAG
>1
ACGTGCCTA-GGTACGCAAG
<BLANKLINE>
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import TestRunner
from ._tabular_msa import TabularMSA
from ._alignment import Alignment, SequenceCollection
from ._pairwise import (
local_pairwise_align_nucleotide, local_pairwise_align_protein,
local_pairwise_align, global_pairwise_align_nucleotide,
global_pairwise_align_protein, global_pairwise_align,
make_identity_substitution_matrix, local_pairwise_align_ssw
)
from skbio.alignment._ssw_wrapper import (
StripedSmithWaterman, AlignmentStructure)
from ._exception import (SequenceCollectionError, AlignmentError)
__all__ = ['TabularMSA', 'Alignment', 'SequenceCollection',
'StripedSmithWaterman', 'AlignmentStructure',
'local_pairwise_align_ssw', 'SequenceCollectionError',
'AlignmentError', 'global_pairwise_align',
'global_pairwise_align_nucleotide', 'global_pairwise_align_protein',
'local_pairwise_align', 'local_pairwise_align_nucleotide',
'local_pairwise_align_protein', 'make_identity_substitution_matrix']
test = TestRunner(__file__).test
| bsd-3-clause |
gameduell/duell | bin/win/python2.7.9/Lib/quopri.py | 424 | 6969 | #! /usr/bin/env python
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = '='
MAXLINESIZE = 76
HEX = '0123456789ABCDEF'
EMPTYSTRING = ''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular character needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
if c in ' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == '_':
return header
return c == ESCAPE or not (' ' <= c <= '~')
def quote(c):
"""Quote a single character."""
i = ord(c)
return ESCAPE + HEX[i//16] + HEX[i%16]
def encode(input, output, quotetabs, header = 0):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per
RFC 1522.
"""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs = quotetabs, header = header)
output.write(odata)
return
def write(s, output=output, lineEnd='\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in ' \t':
output.write(s[:-1] + quote(s[-1]) + lineEnd)
elif s == '.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = ''
if line[-1:] == '\n':
line = line[:-1]
stripped = '\n'
# Calculate the un-length-limited encoded line
for c in line:
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == ' ':
outline.append('_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd='=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs = 0, header = 0):
if b2a_qp is not None:
return b2a_qp(s, quotetabs = quotetabs, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header = 0):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are files with readline() and write() methods.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header = header)
output.write(odata)
return
new = ''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1] == '\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1] in " \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i]
if c == '_' and header:
new = new + ' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1]) and ishex(line[i+2]):
new = new + chr(unhex(line[i+1:i+3])); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + '\n')
new = ''
if new:
output.write(new)
def decodestring(s, header = 0):
if a2b_qp is not None:
return a2b_qp(s, header = header)
from cStringIO import StringIO
infp = StringIO(s)
outfp = StringIO()
decode(infp, outfp, header = header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the character 'c' is a hexadecimal digit."""
return '0' <= c <= '9' or 'a' <= c <= 'f' or 'A' <= c <= 'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
if '0' <= c <= '9':
i = ord('0')
elif 'a' <= c <= 'f':
i = ord('a')-10
elif 'A' <= c <= 'F':
i = ord('A')-10
else:
break
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print "usage: quopri [-t | -d] [file] ..."
print "-t: quote tabs"
print "-d: decode; default encode"
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print "-t and -d are mutually exclusive"
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin
else:
try:
fp = open(file)
except IOError, msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
if deco:
decode(fp, sys.stdout)
else:
encode(fp, sys.stdout, tabs)
if fp is not sys.stdin:
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
| bsd-2-clause |
yamamatsu2/pimouse_ros | scripts/lightsensors.py | 1 | 1258 | #!/usr/bin/env python
#encoding: utf8
import sys, rospy
from pimouse_ros.msg import LightSensorValues
def get_freq():
f = rospy.get_param('lightsensors_freq',10)
try:
if f <= 0.0:
raise Exception()
except:
rospy.logerr("value error: ligtsensors_freq")
sys.exit(1)
return f
if __name__ == '__main__':
devfile = '/dev/rtlightsensor0'
rospy.init_node('lightsensors')
pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1)
freq = get_freq()
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
try:
with open(devfile,'r') as f:
data = f.readline().split()
data = [ int(e) for e in data]
d = LightSensorValues()
d.right_forward = data[0]
d.right_side = data[1]
d.left_side = data[2]
d.left_forward = data[3]
d.sum_all = sum(data)
d.sum_forward = data[0] + data[3]
pub.publish(d)
except:
rospy.logerr("cannot open " + devfile)
f = get_freq()
if f != freq:
freq = f
rate = rospy.Rate(freq)
rate.sleep()
| gpl-3.0 |
mchristopher/PokemonGo-DesktopMap | app/pylibs/win32/Cryptodome/Hash/SHA256.py | 2 | 6305 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""SHA-256 cryptographic hash algorithm.
SHA-256 belongs to the SHA-2_ family of cryptographic hashes.
It produces the 256 bit digest of a message.
>>> from Cryptodome.Hash import SHA256
>>>
>>> h = SHA256.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
*SHA* stands for Secure Hash Algorithm.
.. _SHA-2: http://csrc.nist.gov/publications/fips/fips180-2/fips180-4.pdf
"""
from Cryptodome.Util.py3compat import *
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
expect_byte_string)
_raw_sha256_lib = load_pycryptodome_raw_lib("Cryptodome.Hash._SHA256",
"""
int SHA256_init(void **shaState);
int SHA256_destroy(void *shaState);
int SHA256_update(void *hs,
const uint8_t *buf,
size_t len);
int SHA256_digest(const void *shaState,
uint8_t digest[32]);
int SHA256_copy(const void *src, void *dst);
""")
class SHA256Hash(object):
"""Class that implements a SHA-256 hash
"""
#: The size of the resulting hash in bytes.
digest_size = 32
#: The internal block size of the hash algorithm in bytes.
block_size = 64
#: ASN.1 Object ID
oid = "2.16.840.1.101.3.4.2.1"
def __init__(self, data=None):
state = VoidPointer()
result = _raw_sha256_lib.SHA256_init(state.address_of())
if result:
raise ValueError("Error %d while instantiating SHA256"
% result)
self._state = SmartPointer(state.get(),
_raw_sha256_lib.SHA256_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
data : byte string
The next chunk of the message being hashed.
"""
expect_byte_string(data)
result = _raw_sha256_lib.SHA256_update(self._state.get(),
data,
c_size_t(len(data)))
if result:
raise ValueError("Error %d while instantiating SHA256"
% result)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
bfr = create_string_buffer(self.digest_size)
result = _raw_sha256_lib.SHA256_digest(self._state.get(),
bfr)
if result:
raise ValueError("Error %d while instantiating SHA256"
% result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
clone = SHA256Hash()
result = _raw_sha256_lib.SHA256_copy(self._state.get(),
clone._state.get())
if result:
raise ValueError("Error %d while copying SHA256" % result)
return clone
def new(self, data=None):
return SHA256Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `SHA256Hash.update()`.
Optional.
:Return: A `SHA256Hash` object
"""
return SHA256Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = SHA256Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = SHA256Hash.block_size
| mit |
authman/Python201609 | pairol_alex/Assignments/pylotNinja/controllers/Ninja.py | 1 | 1500 | from system.core.controller import*
import random
import datetime
from time import strftime
class Ninja(Controller):
def __init__(self, action):
super(Ninja, self).__init__(action)
def index(self):
try:
session['gold']
except:
session['gold'] = 0
try:
session['activities']
except:
session['activities'] = []
return self.load_view('ninja.html')
def clear(self):
session.clear()
return redirect ('/')
def process(self):
action = request.form["action"]
randomNumber = random.random()
print randomNumber
if action == "farm":
earn = int(randomNumber*10)+10
elif action == "cave":
earn = int(randomNumber*5)+5
elif action == "house":
earn = int(randomNumber*3)+3
elif action == "casino":
earn = int(randomNumber*100)-50
session['gold'] += earn
timeNow = datetime.datetime.now().strftime('%Y/%m/%d')
if earn >=0 :
newAction = {'status' : 'earn',
'action' : "Earned {} gold from {} ({})".format(earn, action, timeNow)}
else:
newAction = {'status' : 'lost',
'action' : "Entered Casino and lost {} gold ({})".format(-earn, action, timeNow)}
print newAction
session["activities"].append(newAction)
print session["activities"]
return redirect('/')
| mit |
surhudm/scipy | benchmarks/benchmarks/sparse_linalg_solve.py | 30 | 2163 | """
Check the speed of the conjugate gradient solver.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_equal
try:
from scipy import linalg, sparse
from scipy.sparse.linalg import cg, minres, spsolve
except ImportError:
pass
try:
from scipy.sparse.linalg import lgmres
except ImportError:
pass
from .common import Benchmark
def _create_sparse_poisson1d(n):
# Make Gilbert Strang's favorite matrix
# http://www-math.mit.edu/~gs/PIX/cupcakematrix.jpg
P1d = sparse.diags([[-1]*(n-1), [2]*n, [-1]*(n-1)], [-1, 0, 1])
assert_equal(P1d.shape, (n, n))
return P1d
def _create_sparse_poisson2d(n):
P1d = _create_sparse_poisson1d(n)
P2d = sparse.kronsum(P1d, P1d)
assert_equal(P2d.shape, (n*n, n*n))
return P2d.tocsr()
class Bench(Benchmark):
params = [
[4, 6, 10, 16, 25, 40, 64, 100],
['dense', 'spsolve', 'cg', 'minres', 'lgmres']
]
param_names = ['(n,n)', 'solver']
def setup(self, n, solver):
if solver == 'dense' and n >= 25:
raise NotImplementedError()
self.b = np.ones(n*n)
self.P_sparse = _create_sparse_poisson2d(n)
if solver == 'dense':
self.P_dense = self.P_sparse.A
def time_solve(self, n, solver):
if solver == 'dense':
linalg.solve(self.P_dense, self.b)
elif solver == 'cg':
cg(self.P_sparse, self.b)
elif solver == 'minres':
minres(self.P_sparse, self.b)
elif solver == 'lgmres':
lgmres(self.P_sparse, self.b)
elif solver == 'spsolve':
spsolve(self.P_sparse, self.b)
else:
raise ValueError('Unknown solver: %r' % solver)
class Lgmres(Benchmark):
params = [
[10, 50, 100, 1000, 10000],
[10, 30, 60, 90, 180],
]
param_names = ['n', 'm']
def setup(self, n, m):
np.random.seed(1234)
self.A = sparse.eye(n, n) + sparse.rand(n, n, density=0.01)
self.b = np.ones(n)
def time_inner(self, n, m):
lgmres(self.A, self.b, inner_m=m, maxiter=1)
| bsd-3-clause |
dbrgn/fahrplan | fahrplan/tests/test.py | 1 | 9255 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import, unicode_literals
import sys
from datetime import datetime
from subprocess import Popen, PIPE
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
import unittest2 as unittest
else:
import unittest
from .. import meta
from .. import parser
BASE_COMMAND = 'python -m fahrplan.main'
try:
ENCODING = sys.stdout.encoding or 'utf-8'
except AttributeError:
ENCODING = 'utf-8'
# Run command
class CommandOutput(object):
def __init__(self, stdout, stderr, code):
self.std_err = stderr
self.std_out = stdout
self.status_code = code
def run_command(command):
p = Popen(command, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
return CommandOutput(stdout.decode(ENCODING), stderr.decode(ENCODING), p.returncode)
class TestBasicArgumentHandling(unittest.TestCase):
def testRequiredArgumentsMissing(self):
r = run_command('{0} von bern'.format(BASE_COMMAND))
self.assertEqual('Error: "from" and "to" arguments must be present!\n', r.std_err)
def testVersionInfo(self):
args = ['-v', '--version', '-d -i -v']
for arg in args:
r = run_command('{0} {1}'.format(BASE_COMMAND, arg))
self.assertEqual('%s %s\n' % (meta.title, meta.version), r.std_out)
def testHelp(self):
args = ['-h', '--help', '-d -i --help']
for arg in args:
r = run_command('{0} {1}'.format(BASE_COMMAND, arg))
self.assertTrue(meta.description in r.std_out)
self.assertTrue('usage:' in r.std_out)
self.assertTrue('optional arguments:' in r.std_out)
self.assertTrue('positional arguments:' in r.std_out)
self.assertTrue('Examples:' in r.std_out)
class TestInputParsing(unittest.TestCase):
valid_expected_result = {
'arrival': '19:00',
'departure': '18:30',
'from': 'Zürich',
'to': 'Locarno',
'via': 'Genève',
}
def testEmptyArguments(self):
tokens = []
data, language = parser.parse_input(tokens)
self.assertEqual({}, data)
self.assertIsNone(language)
def testOneValidArgument(self):
tokens = 'from'.split()
data, language = parser.parse_input(tokens)
self.assertEqual({}, data)
self.assertIsNone(language)
def testOneInvalidArgument(self):
tokens = 'foobar'.split()
data, language = parser.parse_input(tokens)
self.assertEqual({}, data)
self.assertIsNone(language)
def testValidArgumentsEn(self):
tokens = 'from Zürich to Locarno via Genève departure 18:30 arrival 19:00'.split()
data, language = parser._process_tokens(tokens, sloppy_validation=True)
self.assertEqual(self.valid_expected_result, data)
self.assertEqual('en', language)
def testValidArgumentsDe(self):
tokens = 'von Zürich nach Locarno via Genève ab 18:30 an 19:00'.split()
data, language = parser._process_tokens(tokens, sloppy_validation=True)
self.assertEqual(self.valid_expected_result, data)
self.assertEqual('de', language)
def testValidArgumentsFr(self):
tokens = 'de Zürich à Locarno via Genève départ 18:30 arrivée 19:00'.split()
data, language = parser._process_tokens(tokens, sloppy_validation=True)
self.assertEqual(self.valid_expected_result, data)
self.assertEqual('fr', language)
def testTwoArguments(self):
tokens = 'Zürich Basel'.split()
data, language = parser.parse_input(tokens)
self.assertEqual({'from': 'Zürich', 'to': 'Basel'}, data)
self.assertEqual('en', language)
def testNotEnoughArgument(self):
tokens = 'from basel via bern'.split()
self.assertRaises(ValueError, parser.parse_input, tokens)
def testBasicDepartureTime(self):
tokens = 'von basel nach bern ab 1945'.split()
expected = {'from': 'basel', 'time': '19:45', 'to': 'bern'}
self.assertEqual(expected, parser.parse_input(tokens)[0])
def testBasicArrivalTime(self):
tokens = 'von basel nach bern an 18:00'.split()
expected = {'from': 'basel', 'isArrivalTime': 1, 'time': '18:00', 'to': 'bern'}
self.assertEqual(expected, parser.parse_input(tokens)[0])
def testImmediateTimes(self):
now = datetime.now().strftime('%H:%M')
queries = [
'von basel nach bern ab jetzt'.split(),
'von basel nach bern ab sofort'.split(),
'from basel to bern departure now'.split(),
'from basel to bern departure right now'.split(),
'from basel to bern departure immediately'.split(),
'de basel à bern départ maitenant'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual(now, data['time'])
def testNoonTimes(self):
queries = [
'von basel nach bern ab mittag'.split(),
'from basel to bern departure noon'.split(),
'de basel à bern départ midi'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual('12:00', data['time'])
def testMidnightTimes(self):
queries = [
'von basel nach bern ab mitternacht'.split(),
'from basel to bern departure midnight'.split(),
'de basel à bern départ minuit'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual('23:59', data['time'])
def testAtTimes(self):
queries = [
'von basel nach bern ab am mittag'.split(),
'von basel nach bern ab um 12:00'.split(),
'from basel to bern departure at noon'.split(),
'from basel to bern departure at 12:00'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual('12:00', data['time'])
def testDates(self):
year = datetime.now().year
queries = [
'von basel nach bern ab 22/10/{} 13:00'.format(year).split(),
'von basel nach bern ab um 22/10 13:00'.split(),
'from basel to bern departure 22/10 13:00'.split(),
'from basel to bern departure 22/10 13:00'.split(),
]
for tokens in queries:
data, _ = parser.parse_input(tokens)
self.assertEqual('13:00', data['time'])
self.assertEqual('{}/10/22'.format(year), data['date'])
class TestBasicQuery(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup method that is only run once."""
cmd = '{0} von basel nach zürich ab 07:00'.format(BASE_COMMAND)
cls.r = run_command(cmd)
cls.rows = cls.r.std_out.split('\n')
def returnStatus(self):
"""The command should return the status code 0."""
self.assertEqual(0, self.r.status_code)
def testRowCount(self):
"""A normal output table should have 16 rows."""
self.assertEqual(16, len(self.rows))
def testHeadline(self):
"""Test the headline items."""
headline_items = ['Station', 'Platform', 'Date', 'Time',
'Duration', 'Chg.', 'With', 'Occupancy']
for item in headline_items:
self.assertIn(item, self.rows[1])
def testStationNames(self):
"""Station names should be "Basel SBB" and "Zürich HB"."""
self.assertTrue("Basel SBB" in self.rows[3])
self.assertTrue("Zürich HB" in self.rows[4])
class TestLanguages(unittest.TestCase):
def testBasicQuery(self):
"""
Test a query in three languages and assert that the output of all
three queries is equal.
"""
args = ['von bern nach basel via zürich ab 15:00',
'from bern to basel via zürich departure 15:00',
'de bern à basel via zürich départ 15:00']
jobs = [run_command('{0} {1}'.format(BASE_COMMAND, arg)) for arg in args]
statuscodes = [job.status_code for job in jobs]
self.assertEqual([0, 0, 0], statuscodes)
stdout_values = [job.std_out for job in jobs]
self.assertTrue(stdout_values[1:] == stdout_values[:-1])
class RegressionTests(unittest.TestCase):
def testIss11(self):
"""
Github issue #11:
Don't allow both departure and arrival time.
"""
args = 'von bern nach basel ab 15:00 an 16:00'
query = run_command('{0} {1}'.format(BASE_COMMAND, args))
self.assertEqual('Error: You can\'t specify both departure *and* arrival time.\n',
query.std_err)
def testIss13(self):
"""
Github issue #13:
Station not found: ValueError: max() arg is an empty sequence.
"""
args = 'von zuerich manegg nach nach stadelhofen'
query = run_command('{0} {1}'.format(BASE_COMMAND, args))
self.assertEqual(0, query.status_code, 'Program terminated with statuscode != 0')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
EducationForDevelopment/webapp | lib/flask/__init__.py | 345 | 1672 | # -*- coding: utf-8 -*-
"""
flask
~~~~~
A microframework based on Werkzeug. It's extensively documented
and follows best practice patterns.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
__version__ = '0.10'
# utilities we import from Werkzeug and Jinja2 that are unused
# in the module but are exported as public interface.
from werkzeug.exceptions import abort
from werkzeug.utils import redirect
from jinja2 import Markup, escape
from .app import Flask, Request, Response
from .config import Config
from .helpers import url_for, flash, send_file, send_from_directory, \
get_flashed_messages, get_template_attribute, make_response, safe_join, \
stream_with_context
from .globals import current_app, g, request, session, _request_ctx_stack, \
_app_ctx_stack
from .ctx import has_request_context, has_app_context, \
after_this_request, copy_current_request_context
from .module import Module
from .blueprints import Blueprint
from .templating import render_template, render_template_string
# the signals
from .signals import signals_available, template_rendered, request_started, \
request_finished, got_request_exception, request_tearing_down, \
appcontext_tearing_down, appcontext_pushed, \
appcontext_popped, message_flashed
# We're not exposing the actual json module but a convenient wrapper around
# it.
from . import json
# This was the only thing that flask used to export at one point and it had
# a more generic name.
jsonify = json.jsonify
# backwards compat, goes away in 1.0
from .sessions import SecureCookieSession as Session
json_available = True
| mit |
sshleifer/object_detection_kitti | object_detection/core/matcher_test.py | 21 | 7101 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.matcher."""
import numpy as np
import tensorflow as tf
from object_detection.core import matcher
class AnchorMatcherTest(tf.test.TestCase):
def test_get_correct_matched_columnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [0, 1, 3, 5]
matched_column_indices = match.matched_column_indices()
self.assertEquals(matched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_column_indices = sess.run(matched_column_indices)
self.assertAllEqual(matched_column_indices, expected_column_indices)
def test_get_correct_counts(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
exp_num_matched_columns = 4
exp_num_unmatched_columns = 2
exp_num_ignored_columns = 1
num_matched_columns = match.num_matched_columns()
num_unmatched_columns = match.num_unmatched_columns()
num_ignored_columns = match.num_ignored_columns()
self.assertEquals(num_matched_columns.dtype, tf.int32)
self.assertEquals(num_unmatched_columns.dtype, tf.int32)
self.assertEquals(num_ignored_columns.dtype, tf.int32)
with self.test_session() as sess:
(num_matched_columns_out, num_unmatched_columns_out,
num_ignored_columns_out) = sess.run(
[num_matched_columns, num_unmatched_columns, num_ignored_columns])
self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns)
self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns)
self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns)
def testGetCorrectUnmatchedColumnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4]
unmatched_column_indices = match.unmatched_column_indices()
self.assertEquals(unmatched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_column_indices = sess.run(unmatched_column_indices)
self.assertAllEqual(unmatched_column_indices, expected_column_indices)
def testGetCorrectMatchedRowIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_row_indices = [3, 1, 0, 5]
matched_row_indices = match.matched_row_indices()
self.assertEquals(matched_row_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_row_inds = sess.run(matched_row_indices)
self.assertAllEqual(matched_row_inds, expected_row_indices)
def test_get_correct_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [6]
ignored_column_indices = match.ignored_column_indices()
self.assertEquals(ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
ignored_column_indices = sess.run(ignored_column_indices)
self.assertAllEqual(ignored_column_indices, expected_column_indices)
def test_get_correct_matched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [True, True, False, True, False, True, False]
matched_column_indicator = match.matched_column_indicator()
self.assertEquals(matched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
matched_column_indicator = sess.run(matched_column_indicator)
self.assertAllEqual(matched_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, True, False, True, False, False]
unmatched_column_indicator = match.unmatched_column_indicator()
self.assertEquals(unmatched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
unmatched_column_indicator = sess.run(unmatched_column_indicator)
self.assertAllEqual(unmatched_column_indicator, expected_column_indicator)
def test_get_correct_ignored_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, False, False, False, False, True]
ignored_column_indicator = match.ignored_column_indicator()
self.assertEquals(ignored_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
ignored_column_indicator = sess.run(ignored_column_indicator)
self.assertAllEqual(ignored_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4, 6]
unmatched_ignored_column_indices = (match.
unmatched_or_ignored_column_indices())
self.assertEquals(unmatched_ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_ignored_column_indices = sess.run(
unmatched_ignored_column_indices)
self.assertAllEqual(unmatched_ignored_column_indices,
expected_column_indices)
def test_all_columns_accounted_for(self):
# Note: deliberately setting to small number so not always
# all possibilities appear (matched, unmatched, ignored)
num_matches = 10
match_results = tf.random_uniform(
[num_matches], minval=-2, maxval=5, dtype=tf.int32)
match = matcher.Match(match_results)
matched_column_indices = match.matched_column_indices()
unmatched_column_indices = match.unmatched_column_indices()
ignored_column_indices = match.ignored_column_indices()
with self.test_session() as sess:
matched, unmatched, ignored = sess.run([
matched_column_indices, unmatched_column_indices,
ignored_column_indices
])
all_indices = np.hstack((matched, unmatched, ignored))
all_indices_sorted = np.sort(all_indices)
self.assertAllEqual(all_indices_sorted,
np.arange(num_matches, dtype=np.int32))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
caidongyun/pylearn2 | pylearn2/datasets/avicenna.py | 37 | 1062 | """
.. todo::
WRITEME
"""
from pylearn2.datasets import utlc
import numpy as N
class Avicenna(object):
"""
.. todo::
WRITEME
Parameters
----------
which_set : WRITEME
standardize : WRITEME
"""
def __init__(self, which_set, standardize):
train, valid, test = utlc.load_ndarray_dataset('avicenna')
if which_set == 'train':
self.X = train
elif which_set == 'valid':
self.X = valid
elif which_set == 'test':
self.X = test
else:
assert False
if standardize:
union = N.concatenate([train, valid, test], axis=0)
# perform mean and std in float64 to avoid losing
# too much numerical precision
self.X -= union.mean(axis=0, dtype='float64')
std = union.std(axis=0, dtype='float64')
std[std < 1e-3] = 1e-3
self.X /= std
def get_design_matrix(self):
"""
.. todo::
WRITEME
"""
return self.X
| bsd-3-clause |
iem-projects/WILMAmix | WILMA/gui/Translator.py | 1 | 1678 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2013, IOhannes m zmölnig, IEM
# This file is part of WILMix
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WILMix. If not, see <http://www.gnu.org/licenses/>.
import logging as logging_
logging = logging_.getLogger('WILMA.gui.Translator')
import locale, os
from PySide.QtCore import QTranslator, QLibraryInfo
class Translator:
def __init__(self, oApp):
try:
# Install the appropriate editor translation file
sLocale = locale.getdefaultlocale()[0]
oTranslator = QTranslator()
path=os.path.join('i18n', sLocale)
if oTranslator.load(path):
oApp.installTranslator(oTranslator)
logging.debug( "translator: OK")
## # Install the appropriate Qt translation file
## oTranslatorQt = QTranslator()
## print 'qt_' + sLocale, QLibraryInfo.location(QLibraryInfo.TranslationsPath)
## if oTranslatorQt.load('qt_' + sLocale, QLibraryInfo.location(QLibraryInfo.TranslationsPath)):
## oApp.installTranslator(oTranslatorQt)
except Exception, oEx:
logging.exception( "translator")
pass
| gpl-2.0 |
dstufft/sqlalchemy | test/dialect/mysql/test_query.py | 12 | 6313 | # coding: utf-8
from sqlalchemy.testing import eq_, is_
from sqlalchemy import *
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import testing
class IdiosyncrasyTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mysql'
__backend__ = True
@testing.emits_warning()
def test_is_boolean_symbols_despite_no_native(self):
is_(
testing.db.scalar(select([cast(true().is_(true()), Boolean)])),
True
)
is_(
testing.db.scalar(select([cast(true().isnot(true()), Boolean)])),
False
)
is_(
testing.db.scalar(select([cast(false().is_(false()), Boolean)])),
True
)
class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = 'mysql'
__backend__ = True
@classmethod
def setup_class(cls):
global metadata, cattable, matchtable
metadata = MetaData(testing.db)
cattable = Table('cattable', metadata,
Column('id', Integer, primary_key=True),
Column('description', String(50)),
mysql_engine='MyISAM'
)
matchtable = Table('matchtable', metadata,
Column('id', Integer, primary_key=True),
Column('title', String(200)),
Column('category_id', Integer, ForeignKey('cattable.id')),
mysql_engine='MyISAM'
)
metadata.create_all()
cattable.insert().execute([
{'id': 1, 'description': 'Python'},
{'id': 2, 'description': 'Ruby'},
])
matchtable.insert().execute([
{'id': 1,
'title': 'Agile Web Development with Ruby On Rails',
'category_id': 2},
{'id': 2,
'title': 'Dive Into Python',
'category_id': 1},
{'id': 3,
'title': "Programming Matz's Ruby",
'category_id': 2},
{'id': 4,
'title': 'The Definitive Guide to Django',
'category_id': 1},
{'id': 5,
'title': 'Python in a Nutshell',
'category_id': 1}
])
@classmethod
def teardown_class(cls):
metadata.drop_all()
@testing.fails_on('mysql+mysqlconnector', 'uses pyformat')
def test_expression_format(self):
format = testing.db.dialect.paramstyle == 'format' and '%s' or '?'
self.assert_compile(
matchtable.c.title.match('somstr'),
"MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)" % format)
@testing.fails_on('mysql+mysqldb', 'uses format')
@testing.fails_on('mysql+pymysql', 'uses format')
@testing.fails_on('mysql+cymysql', 'uses format')
@testing.fails_on('mysql+oursql', 'uses format')
@testing.fails_on('mysql+pyodbc', 'uses format')
@testing.fails_on('mysql+zxjdbc', 'uses format')
def test_expression_pyformat(self):
format = '%(title_1)s'
self.assert_compile(
matchtable.c.title.match('somstr'),
"MATCH (matchtable.title) AGAINST (%s IN BOOLEAN MODE)" % format)
def test_simple_match(self):
results = (matchtable.select().
where(matchtable.c.title.match('python')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([2, 5], [r.id for r in results])
def test_not_match(self):
results = (matchtable.select().
where(~matchtable.c.title.match('python')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 4], [r.id for r in results])
def test_simple_match_with_apostrophe(self):
results = (matchtable.select().
where(matchtable.c.title.match("Matz's")).
execute().
fetchall())
eq_([3], [r.id for r in results])
def test_return_value(self):
# test [ticket:3263]
result = testing.db.execute(
select([
matchtable.c.title.match('Agile Ruby Programming').label('ruby'),
matchtable.c.title.match('Dive Python').label('python'),
matchtable.c.title
]).order_by(matchtable.c.id)
).fetchall()
eq_(
result,
[
(2.0, 0.0, 'Agile Web Development with Ruby On Rails'),
(0.0, 2.0, 'Dive Into Python'),
(2.0, 0.0, "Programming Matz's Ruby"),
(0.0, 0.0, 'The Definitive Guide to Django'),
(0.0, 1.0, 'Python in a Nutshell')
]
)
def test_or_match(self):
results1 = (matchtable.select().
where(or_(matchtable.c.title.match('nutshell'),
matchtable.c.title.match('ruby'))).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results1])
results2 = (matchtable.select().
where(matchtable.c.title.match('nutshell ruby')).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results2])
def test_and_match(self):
results1 = (matchtable.select().
where(and_(matchtable.c.title.match('python'),
matchtable.c.title.match('nutshell'))).
execute().
fetchall())
eq_([5], [r.id for r in results1])
results2 = (matchtable.select().
where(matchtable.c.title.match('+python +nutshell')).
execute().
fetchall())
eq_([5], [r.id for r in results2])
def test_match_across_joins(self):
results = (matchtable.select().
where(and_(cattable.c.id==matchtable.c.category_id,
or_(cattable.c.description.match('Ruby'),
matchtable.c.title.match('nutshell')))).
order_by(matchtable.c.id).
execute().
fetchall())
eq_([1, 3, 5], [r.id for r in results])
| mit |
jsilhan/dnf-plugins-core | plugins/copr.py | 1 | 16500 | # supplies the 'copr' command.
#
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import print_function
from dnf.pycomp import PY3
from subprocess import call
from dnfpluginscore import _, logger
from dnf.i18n import ucd
import dnf
import glob
import json
import os
import platform
import shutil
import stat
import rpm
PLUGIN_CONF = 'copr'
YES = set([_('yes'), _('y')])
NO = set([_('no'), _('n'), ''])
# compatibility with Py2 and Py3 - rename raw_input() to input() on Py2
try:
input = raw_input
except NameError:
pass
if PY3:
from configparser import ConfigParser
else:
from ConfigParser import ConfigParser
@dnf.plugin.register_command
class CoprCommand(dnf.cli.Command):
""" Copr plugin for DNF """
chroot_config = None
copr_url = "https://copr.fedorainfracloud.org"
aliases = ("copr",)
summary = _("Interact with Copr repositories.")
usage = _("""
enable name/project [chroot]
disable name/project
remove name/project
list name
search project
Examples:
copr enable rhscl/perl516 epel-6-x86_64
copr enable ignatenkobrain/ocltoys
copr disable rhscl/perl516
copr remove rhscl/perl516
copr list ignatenkobrain
copr search tests
""")
@staticmethod
def set_argparser(parser):
parser.add_argument('subcommand', nargs=1,
choices=['help', 'enable', 'disable',
'remove', 'list', 'search'])
parser.add_argument('arg', nargs='*')
def configure(self):
raw_config = ConfigParser()
filepath = os.path.join(os.path.expanduser("~"), ".config", "copr")
if raw_config.read(filepath):
if PY3:
self.copr_url = raw_config["copr-cli"].get("copr_url", None)
else:
self.copr_url = raw_config.get("copr-cli", "copr_url", None)
if self.copr_url != "https://copr.fedorainfracloud.org":
print(_("Warning: we are using non-standard Copr URL '{}'.").format(self.copr_url))
# Useful for forcing a distribution
copr_plugin_config = ConfigParser()
config_file = None
for path in self.base.conf.pluginconfpath:
test_config_file = '{}/{}.conf'.format(path, PLUGIN_CONF)
if os.path.isfile(test_config_file):
config_file = test_config_file
if config_file is not None:
copr_plugin_config.read(config_file)
if copr_plugin_config.has_option('main', 'distribution') and copr_plugin_config.has_option('main', 'releasever'):
distribution = copr_plugin_config.get('main', 'distribution')
releasever = copr_plugin_config.get('main', 'releasever')
self.chroot_config = [distribution, releasever]
else:
self.chroot_config = [False, False]
def run(self):
subcommand = self.opts.subcommand[0]
if subcommand == "help":
self.cli.optparser.print_help(self)
return 0
try:
project_name = self.opts.arg[0]
except (ValueError, IndexError):
logger.critical(
_('Error: ') +
_('exactly two additional parameters to '
'copr command are required'))
self.cli.optparser.print_help(self)
raise dnf.cli.CliError(
_('exactly two additional parameters to '
'copr command are required'))
try:
chroot = self.opts.arg[1]
except IndexError:
chroot = self._guess_chroot(self.chroot_config)
# commands without defined copr_username/copr_projectname
if subcommand == "list":
self._list_user_projects(project_name)
return
if subcommand == "search":
self._search(project_name)
return
try:
copr_username, copr_projectname = project_name.split("/")
except ValueError:
logger.critical(
_('Error: ') +
_('use format `copr_username/copr_projectname` '
'to reference copr project'))
raise dnf.cli.CliError(_('bad copr project format'))
repo_filename = "{}/_copr_{}-{}.repo" \
.format(self.base.conf.get_reposdir, copr_username, copr_projectname)
if subcommand == "enable":
self._need_root()
self._ask_user("""
You are about to enable a Copr repository. Please note that this
repository is not part of the main distribution, and quality may vary.
The Fedora Project does not exercise any power over the contents of
this repository beyond the rules outlined in the Copr FAQ at
<https://fedorahosted.org/copr/wiki/UserDocs#WhatIcanbuildinCopr>, and
packages are not held to any quality or security level.
Please do not file bug reports about these packages in Fedora
Bugzilla. In case of problems, contact the owner of this repository.
Do you want to continue? [y/N]: """)
self._download_repo(project_name, repo_filename, chroot)
logger.info(_("Repository successfully enabled."))
elif subcommand == "disable":
self._need_root()
self._disable_repo(copr_username, copr_projectname)
logger.info(_("Repository successfully disabled."))
elif subcommand == "remove":
self._need_root()
self._remove_repo(repo_filename)
logger.info(_("Repository successfully removed."))
else:
raise dnf.exceptions.Error(
_('Unknown subcommand {}.').format(subcommand))
def _list_user_projects(self, user_name):
# http://copr.fedorainfracloud.org/api/coprs/ignatenkobrain/
api_path = "/api/coprs/{}/".format(user_name)
res = self.base.urlopen(self.copr_url + api_path, mode='w+')
try:
json_parse = json.loads(res.read())
except ValueError:
raise dnf.exceptions.Error(
_("Can't parse repositories for username '{}'.")
.format(user_name))
self._check_json_output(json_parse)
section_text = _("List of {} coprs").format(user_name)
self._print_match_section(section_text)
i = 0
while i < len(json_parse["repos"]):
msg = "{0}/{1} : ".format(user_name,
json_parse["repos"][i]["name"])
desc = json_parse["repos"][i]["description"]
if not desc:
desc = _("No description given")
msg = self.base.output.fmtKeyValFill(ucd(msg), desc)
print(msg)
i += 1
def _search(self, query):
# http://copr.fedorainfracloud.org/api/coprs/search/tests/
api_path = "/api/coprs/search/{}/".format(query)
res = self.base.urlopen(self.copr_url + api_path, mode='w+')
try:
json_parse = json.loads(res.read())
except ValueError:
raise dnf.exceptions.Error(_("Can't parse search for '{}'."
).format(query))
self._check_json_output(json_parse)
section_text = _("Matched: {}").format(query)
self._print_match_section(section_text)
i = 0
while i < len(json_parse["repos"]):
msg = "{0}/{1} : ".format(json_parse["repos"][i]["username"],
json_parse["repos"][i]["coprname"])
desc = json_parse["repos"][i]["description"]
if not desc:
desc = _("No description given.")
msg = self.base.output.fmtKeyValFill(ucd(msg), desc)
print(msg)
i += 1
def _print_match_section(self, text):
formatted = self.base.output.fmtSection(text)
print(formatted)
def _ask_user(self, question):
if self.base.conf.assumeyes and not self.base.conf.assumeno:
return
elif self.base.conf.assumeno and not self.base.conf.assumeyes:
raise dnf.exceptions.Error(_('Safe and good answer. Exiting.'))
answer = None
while not ((answer in YES) or (answer in NO)):
answer = ucd(input(question)).lower()
answer = _(answer)
if answer in YES:
return
else:
raise dnf.exceptions.Error(_('Safe and good answer. Exiting.'))
@classmethod
def _need_root(cls):
# FIXME this should do dnf itself (BZ#1062889)
if os.geteuid() != 0:
raise dnf.exceptions.Error(
_('This command has to be run under the root user.'))
@staticmethod
def _guess_chroot(chroot_config):
""" Guess which chroot is equivalent to this machine """
# FIXME Copr should generate non-specific arch repo
dist = chroot_config
if dist is None or (dist[0] is False) or (dist[1] is False):
dist = platform.linux_distribution()
if "Fedora" in dist:
# x86_64 because repo-file is same for all arch
# ($basearch is used)
if "Rawhide" in dist:
chroot = ("fedora-rawhide-x86_64")
else:
chroot = ("fedora-{}-x86_64".format(dist[1]))
elif "Mageia" in dist:
# Get distribution architecture (Mageia does not use $basearch)
distarch = rpm.expandMacro("%{distro_arch}")
# Set the chroot
if "Cauldron" in dist:
chroot = ("mageia-cauldron-{}".format(distarch))
else:
chroot = ("mageia-{0}-{1}".format(dist[1], distarch))
else:
chroot = ("epel-%s-x86_64" % dist[1].split(".", 1)[0])
return chroot
def _download_repo(self, project_name, repo_filename, chroot=None):
if chroot is None:
chroot = self._guess_chroot(self.chroot_config)
short_chroot = '-'.join(chroot.split('-')[:2])
#http://copr.fedorainfracloud.org/coprs/larsks/rcm/repo/epel-7-x86_64/
api_path = "/coprs/{0}/repo/{1}/".format(project_name, short_chroot)
try:
f = self.base.urlopen(self.copr_url + api_path, mode='w+')
except IOError as e:
if os.path.exists(repo_filename):
os.remove(repo_filename)
if '404' in str(e):
if PY3:
import urllib.request
try:
res = urllib.request.urlopen(self.copr_url + "/coprs/" + project_name)
status_code = res.getcode()
except urllib.error.HTTPError as e:
status_code = e.getcode()
else:
import urllib
res = urllib.urlopen(self.copr_url + "/coprs/" + project_name)
status_code = res.getcode()
if str(status_code) != '404':
raise dnf.exceptions.Error(_("This repository does not have"\
" any builds yet so you cannot enable it now."))
else:
raise dnf.exceptions.Error(_("Such repository does not exist."))
raise
shutil.copy2(f.name, repo_filename)
os.chmod(repo_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
@classmethod
def _remove_repo(cls, repo_filename):
# FIXME is it Copr repo ?
try:
os.remove(repo_filename)
except OSError as e:
raise dnf.exceptions.Error(str(e))
@classmethod
def _disable_repo(cls, copr_username, copr_projectname):
exit_code = call(["dnf", "config-manager", "--set-disabled",
"{}-{}".format(
cls._sanitize_username(copr_username),
copr_projectname)])
if exit_code != 0:
raise dnf.exceptions.Error(
_("Failed to disable copr repo {}/{}"
.format(copr_username, copr_projectname)))
@classmethod
def _get_data(cls, f):
""" Wrapper around response from server
check data and print nice error in case of some error (and return None)
otherwise return json object.
"""
try:
output = json.loads(f.read())
except ValueError:
dnf.cli.CliError(_("Unknown response from server."))
return
return output
@classmethod
def _check_json_output(cls, json_obj):
if json_obj["output"] != "ok":
raise dnf.exceptions.Error("{}".format(json_obj["error"]))
@classmethod
def _sanitize_username(cls, copr_username):
if copr_username[0] == "@":
return "group_{}".format(copr_username[1:])
else:
return copr_username
@dnf.plugin.register_command
class PlaygroundCommand(CoprCommand):
""" Playground plugin for DNF """
aliases = ("playground",)
summary = _("Interact with Playground repository.")
usage = " [enable|disable|upgrade]"
def _cmd_enable(self, chroot):
self._need_root()
self._ask_user("""
You are about to enable a Playground repository.
Do you want to continue? [y/N]: """)
api_url = "{0}/api/playground/list/".format(
self.copr_url)
f = self.base.urlopen(api_url, mode="w+")
output = self._get_data(f)
f.close()
if output["output"] != "ok":
raise dnf.cli.CliError(_("Unknown response from server."))
for repo in output["repos"]:
project_name = "{0}/{1}".format(repo["username"],
repo["coprname"])
repo_filename = "{}/_playground_{}.repo".format(self.base.conf.get_reposdir, project_name.replace("/", "-"))
try:
if chroot not in repo["chroots"]:
continue
api_url = "{0}/api/coprs/{1}/detail/{2}/".format(
self.copr_url, project_name, chroot)
f = self.base.urlopen(api_url, mode='w+')
output2 = self._get_data(f)
f.close()
if (output2 and ("output" in output2)
and (output2["output"] == "ok")):
self._download_repo(project_name, repo_filename, chroot)
except dnf.exceptions.Error:
# likely 404 and that repo does not exist
pass
def _cmd_disable(self):
self._need_root()
for repo_filename in glob.glob("{}/_playground_*.repo".format(self.base.conf.get_reposdir)):
self._remove_repo(repo_filename)
@staticmethod
def set_argparser(parser):
parser.add_argument('subcommand', nargs=1,
choices=['enable', 'disable', 'upgrade'])
def run(self):
subcommand = self.opts.subcommand[0]
chroot = self._guess_chroot(self.chroot_config)
if subcommand == "enable":
self._cmd_enable(chroot)
logger.info(_("Playground repositories successfully enabled."))
elif subcommand == "disable":
self._cmd_disable()
logger.info(_("Playground repositories successfully disabled."))
elif subcommand == "upgrade":
self._cmd_disable()
self._cmd_enable(chroot)
logger.info(_("Playground repositories successfully updated."))
else:
raise dnf.exceptions.Error(
_('Unknown subcommand {}.').format(subcommand))
| gpl-2.0 |
jcsirot/kubespray | roles/kubernetes-apps/ansible/library/kube.py | 7 | 8019 | #!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file.
kubectl:
required: false
default: null
description:
- The path to the kubectl bin
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating ore updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.kubectl = module.params.get('kubectl')
if self.kubectl is None:
self.kubectl = module.get_bin_path('kubectl', True)
self.base_cmd = [self.kubectl]
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.name = module.params.get('name')
self.filename = module.params.get('filename')
self.resource = module.params.get('resource')
self.label = module.params.get('label')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True):
if check and self.exists():
return []
cmd = ['create']
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def replace(self):
if not self.force and not self.exists():
return []
cmd = ['replace']
if self.force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def exists(self):
cmd = ['get']
if not self.resource:
return False
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
cmd.append('--no-headers')
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
result = self._execute_nofail(cmd)
if not result:
return False
return True
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
kubectl=dict(),
force=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
)
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create()
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
if manager.exists():
manager.force = True
result = manager.replace()
else:
result = manager.create(check=False)
else:
module.fail_json(msg='Unrecognized state %s.' % state)
if result:
changed = True
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()
| apache-2.0 |
Kamp9/scipy | scipy/io/tests/test_idl.py | 38 | 19231 | from __future__ import division, print_function, absolute_import
from os import path
from warnings import catch_warnings
DATA_PATH = path.join(path.dirname(__file__), 'data')
import numpy as np
from numpy.testing import (assert_equal, assert_array_equal, run_module_suite,
assert_)
from scipy.io.idl import readsav
def object_array(*args):
"""Constructs a numpy array of objects"""
array = np.empty(len(args), dtype=object)
for i in range(len(args)):
array[i] = args[i]
return array
def assert_identical(a, b):
"""Assert whether value AND type are the same"""
assert_equal(a, b)
if type(b) is str:
assert_equal(type(a), type(b))
else:
assert_equal(np.asarray(a).dtype.type, np.asarray(b).dtype.type)
def assert_array_identical(a, b):
"""Assert whether values AND type are the same"""
assert_array_equal(a, b)
assert_equal(a.dtype.type, b.dtype.type)
# Define vectorized ID function for pointer arrays
vect_id = np.vectorize(id)
class TestIdict:
def test_idict(self):
custom_dict = {'a': np.int16(999)}
original_id = id(custom_dict)
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), idict=custom_dict, verbose=False)
assert_equal(original_id, id(s))
assert_('a' in s)
assert_identical(s['a'], np.int16(999))
assert_identical(s['i8u'], np.uint8(234))
class TestScalars:
# Test that scalar values are read in with the correct value and type
def test_byte(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_int16(self):
s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
assert_identical(s.i16s, np.int16(-23456))
def test_int32(self):
s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
assert_identical(s.i32s, np.int32(-1234567890))
def test_float32(self):
s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
assert_identical(s.f32, np.float32(-3.1234567e+37))
def test_float64(self):
s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def test_complex32(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
def test_bytes(self):
s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
assert_identical(s.s, np.bytes_("The quick brown fox jumps over the lazy python"))
def test_structure(self):
pass
def test_complex64(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
def test_heap_pointer(self):
pass
def test_object_reference(self):
pass
def test_uint16(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
assert_identical(s.i16u, np.uint16(65511))
def test_uint32(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
assert_identical(s.i32u, np.uint32(4294967233))
def test_int64(self):
s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
assert_identical(s.i64s, np.int64(-9223372036854774567))
def test_uint64(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
assert_identical(s.i64u, np.uint64(18446744073709529285))
class TestCompressed(TestScalars):
# Test that compressed .sav files can be read in
def test_compressed(self):
s = readsav(path.join(DATA_PATH, 'various_compressed.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
assert_identical(s.f32, np.float32(-3.1234567e+37))
assert_identical(s.c64, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
class TestArrayDimensions:
# Test that multi-dimensional arrays are read in with the correct dimensions
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
class TestStructures:
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars.sav'), verbose=False)
assert_identical(s.scalars.a, np.array(np.int16(1)))
assert_identical(s.scalars.b, np.array(np.int32(2)))
assert_identical(s.scalars.c, np.array(np.float32(3.)))
assert_identical(s.scalars.d, np.array(np.float64(4.)))
assert_identical(s.scalars.e, np.array([b"spam"], dtype=object))
assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j)))
def test_scalars_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 5))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5))
def test_scalars_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_scalars_replicated_3d.sav'), verbose=False)
assert_identical(s.scalars_rep.a, np.repeat(np.int16(1), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2))
assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(object))
assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)]))
assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=object))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (5, ))
assert_equal(s.arrays_rep.b.shape, (5, ))
assert_equal(s.arrays_rep.c.shape, (5, ))
assert_equal(s.arrays_rep.d.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.a[i],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i],
np.array([4., 5., 6., 7.], dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_arrays_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_arrays_replicated_3d.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.a.dtype.type is np.object_)
assert_(s.arrays_rep.b.dtype.type is np.object_)
assert_(s.arrays_rep.c.dtype.type is np.object_)
assert_(s.arrays_rep.d.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.a.shape, (4, 3, 2))
assert_equal(s.arrays_rep.b.shape, (4, 3, 2))
assert_equal(s.arrays_rep.c.shape, (4, 3, 2))
assert_equal(s.arrays_rep.d.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.a[i, j, k],
np.array([1, 2, 3], dtype=np.int16))
assert_array_identical(s.arrays_rep.b[i, j, k],
np.array([4., 5., 6., 7.],
dtype=np.float32))
assert_array_identical(s.arrays_rep.c[i, j, k],
np.array([np.complex64(1+2j),
np.complex64(7+8j)]))
assert_array_identical(s.arrays_rep.d[i, j, k],
np.array([b"cheese", b"bacon", b"spam"],
dtype=object))
def test_inheritance(self):
s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False)
assert_identical(s.fc.x, np.array([0], dtype=np.int16))
assert_identical(s.fc.y, np.array([0], dtype=np.int16))
assert_identical(s.fc.r, np.array([0], dtype=np.int16))
assert_identical(s.fc.c, np.array([4], dtype=np.int16))
class TestPointers:
# Check that pointers in .sav files produce references to the same object in Python
def test_pointers(self):
s = readsav(path.join(DATA_PATH, 'scalar_heap_pointer.sav'), verbose=False)
assert_identical(s.c64_pointer1, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_identical(s.c64_pointer2, np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j))
assert_(s.c64_pointer1 is s.c64_pointer2)
class TestPointerArray:
# Test that pointers in arrays are correctly read in
def test_1d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_1d.sav'), verbose=False)
assert_equal(s.array1d.shape, (123, ))
assert_(np.all(s.array1d == np.float32(4.)))
assert_(np.all(vect_id(s.array1d) == id(s.array1d[0])))
def test_2d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_2d.sav'), verbose=False)
assert_equal(s.array2d.shape, (22, 12))
assert_(np.all(s.array2d == np.float32(4.)))
assert_(np.all(vect_id(s.array2d) == id(s.array2d[0,0])))
def test_3d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_3d.sav'), verbose=False)
assert_equal(s.array3d.shape, (11, 22, 12))
assert_(np.all(s.array3d == np.float32(4.)))
assert_(np.all(vect_id(s.array3d) == id(s.array3d[0,0,0])))
def test_4d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_4d.sav'), verbose=False)
assert_equal(s.array4d.shape, (4, 5, 8, 7))
assert_(np.all(s.array4d == np.float32(4.)))
assert_(np.all(vect_id(s.array4d) == id(s.array4d[0,0,0,0])))
def test_5d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_5d.sav'), verbose=False)
assert_equal(s.array5d.shape, (4, 3, 4, 6, 5))
assert_(np.all(s.array5d == np.float32(4.)))
assert_(np.all(vect_id(s.array5d) == id(s.array5d[0,0,0,0,0])))
def test_6d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_6d.sav'), verbose=False)
assert_equal(s.array6d.shape, (3, 6, 4, 5, 3, 4))
assert_(np.all(s.array6d == np.float32(4.)))
assert_(np.all(vect_id(s.array6d) == id(s.array6d[0,0,0,0,0,0])))
def test_7d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_7d.sav'), verbose=False)
assert_equal(s.array7d.shape, (2, 1, 2, 3, 4, 3, 2))
assert_(np.all(s.array7d == np.float32(4.)))
assert_(np.all(vect_id(s.array7d) == id(s.array7d[0,0,0,0,0,0,0])))
def test_8d(self):
s = readsav(path.join(DATA_PATH, 'array_float32_pointer_8d.sav'), verbose=False)
assert_equal(s.array8d.shape, (4, 3, 2, 1, 2, 3, 5, 4))
assert_(np.all(s.array8d == np.float32(4.)))
assert_(np.all(vect_id(s.array8d) == id(s.array8d[0,0,0,0,0,0,0,0])))
class TestPointerStructures:
# Test that structures are correctly read in
def test_scalars(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers.sav'), verbose=False)
assert_identical(s.pointers.g, np.array(np.float32(4.), dtype=np.object_))
assert_identical(s.pointers.h, np.array(np.float32(4.), dtype=np.object_))
assert_(id(s.pointers.g[0]) == id(s.pointers.h[0]))
def test_pointers_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated.sav'), verbose=False)
assert_identical(s.pointers_rep.g, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_identical(s.pointers_rep.h, np.repeat(np.float32(4.), 5).astype(np.object_))
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_pointers_replicated_3d(self):
s = readsav(path.join(DATA_PATH, 'struct_pointers_replicated_3d.sav'), verbose=False)
s_expect = np.repeat(np.float32(4.), 24).reshape(4, 3, 2).astype(np.object_)
assert_identical(s.pointers_rep.g, s_expect)
assert_identical(s.pointers_rep.h, s_expect)
assert_(np.all(vect_id(s.pointers_rep.g) == vect_id(s.pointers_rep.h)))
def test_arrays(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
def test_arrays_replicated(self):
s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays_replicated.sav'), verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (5, ))
assert_equal(s.arrays_rep.h.shape, (5, ))
# Check values
for i in range(5):
assert_array_identical(s.arrays_rep.g[i], np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i], np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i]) == id(s.arrays_rep.g[0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i]) == id(s.arrays_rep.h[0][0])))
def test_arrays_replicated_3d(self):
pth = path.join(DATA_PATH, 'struct_pointer_arrays_replicated_3d.sav')
s = readsav(pth, verbose=False)
# Check column types
assert_(s.arrays_rep.g.dtype.type is np.object_)
assert_(s.arrays_rep.h.dtype.type is np.object_)
# Check column shapes
assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
assert_equal(s.arrays_rep.h.shape, (4, 3, 2))
# Check values
for i in range(4):
for j in range(3):
for k in range(2):
assert_array_identical(s.arrays_rep.g[i, j, k],
np.repeat(np.float32(4.), 2).astype(np.object_))
assert_array_identical(s.arrays_rep.h[i, j, k],
np.repeat(np.float32(4.), 3).astype(np.object_))
assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0])))
assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0])))
class TestTags:
'''Test that sav files with description tag read at all'''
def test_description(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte_descr.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_null_pointer():
# Regression test for null pointers.
s = readsav(path.join(DATA_PATH, 'null_pointer.sav'), verbose=False)
assert_identical(s.point, None)
assert_identical(s.check, np.int16(5))
def test_invalid_pointer():
# Regression test for invalid pointers (gh-4613).
# In some files in the wild, pointers can sometimes refer to a heap
# variable that does not exist. In that case, we now gracefully fail for
# that variable and replace the variable with None and emit a warning.
# Since it's difficult to artificially produce such files, the file used
# here has been edited to force the pointer reference to be invalid.
with catch_warnings(record=True) as w:
s = readsav(path.join(DATA_PATH, 'invalid_pointer.sav'), verbose=False)
assert_(len(w) == 1)
assert_(str(w[0].message) == ("Variable referenced by pointer not found in "
"heap: variable will be set to None"))
assert_identical(s['a'], np.array([None, None]))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
BeegorMif/HTPC-Manager | lib/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| gpl-3.0 |
jkonecki/autorest | ClientRuntimes/Python/msrest/doc/conf.py | 2 | 7620 | # -*- coding: utf-8 -*-
#
# azure-sdk-for-python documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 27 15:42:45 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pip
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest',
'sphinx.ext.viewcode', 'sphinx.ext.intersphinx']
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'msrest'
copyright = u'2016, Microsoft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for extensions ----------------------------------------------------
autoclass_content = 'both'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
#html_theme_options = {'collapsiblesidebar': True}
# Activate the theme.
#pip.main(['install', 'sphinx_bootstrap_theme'])
#import sphinx_bootstrap_theme
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'msrest-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'msrest.tex', u'msrest Documentation',
u'Microsoft', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
| mit |
mapr/hue | desktop/core/ext-py/Django-1.6.10/django/utils/encoding.py | 92 | 9512 | from __future__ import unicode_literals
import codecs
import datetime
from decimal import Decimal
import locale
import warnings
from django.utils.functional import Promise
from django.utils import six
from django.utils.six.moves.urllib.parse import quote
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class that derives __str__ from __unicode__.
On Python 2, __str__ returns the output of __unicode__ encoded as a UTF-8
bytestring. On Python 3, __str__ returns the output of __unicode__.
Useful as a mix-in. If you support Python 2 and 3 with a single code base,
you can inherit this mix-in and just define __unicode__.
"""
def __init__(self, *args, **kwargs):
warnings.warn("StrAndUnicode is deprecated. Define a __str__ method "
"and apply the @python_2_unicode_compatible decorator "
"instead.", DeprecationWarning, stacklevel=2)
super(StrAndUnicode, self).__init__(*args, **kwargs)
if six.PY3:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if six.PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
def smart_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, six.integer_types + (type(None), float, Decimal,
datetime.datetime, datetime.date, datetime.time))
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first, saves 30-40% when s is an instance of
# six.text_type. This function gets called often in that setting.
if isinstance(s, six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, six.string_types):
if hasattr(s, '__unicode__'):
s = s.__unicode__()
else:
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_text(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, six.memoryview):
s = bytes(s)
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and (s is None or isinstance(s, int)):
return s
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b' '.join([force_bytes(arg, encoding, strings_only,
errors) for arg in s])
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
if six.PY3:
smart_str = smart_text
force_str = force_text
else:
smart_str = smart_bytes
force_str = force_bytes
# backwards compatibility for Python 2
smart_unicode = smart_text
force_unicode = force_text
smart_str.__doc__ = """\
Apply smart_text in Python 3 and smart_bytes in Python 2.
This is suitable for writing to sys.stdout (for instance).
"""
force_str.__doc__ = """\
Apply force_text in Python 3 and force_bytes in Python 2.
"""
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return quote(force_bytes(iri), safe=b"/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(force_bytes(path).replace(b"\\", b"/"), safe=b"/~!*()'")
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
| apache-2.0 |
CiscoUcs/Ironic-UCS | build/lib.linux-x86_64-2.7/ironic/openstack/common/jsonutils.py | 7 | 6832 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import codecs
import datetime
import functools
import inspect
import itertools
import sys
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
except ImportError:
import json
else:
import json
import six
import six.moves.xmlrpc_client as xmlrpclib
from ironic.openstack.common import gettextutils
from ironic.openstack.common import importutils
from ironic.openstack.common import strutils
from ironic.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def dump(obj, fp, *args, **kwargs):
return json.dump(obj, fp, *args, **kwargs)
def loads(s, encoding='utf-8', **kwargs):
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
def load(fp, encoding='utf-8', **kwargs):
return json.load(codecs.getreader(encoding)(fp), **kwargs)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| apache-2.0 |
sogis/Quantum-GIS | python/plugins/processing/core/ProcessingLog.py | 5 | 9837 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingLog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import re
import os
import codecs
import datetime
from processing.tools.system import userFolder
from processing.core.ProcessingConfig import ProcessingConfig
from qgis.core import *
class ProcessingLog:
LOG_ERROR = 'ERROR'
LOG_INFO = 'INFO'
LOG_WARNING = 'WARNING'
LOG_ALGORITHM = 'ALGORITHM'
DATE_FORMAT = u'%a %b %d %Y %H:%M:%S'.encode('utf-8')
recentAlgs = []
@staticmethod
def startLogging():
if os.path.isfile(ProcessingLog.logFilename()):
logfile = codecs.open(ProcessingLog.logFilename(), 'a',
encoding='utf-8')
else:
logfile = codecs.open(ProcessingLog.logFilename(), 'w',
encoding='utf-8')
logfile.write('Started logging at ' +
datetime.datetime.now().strftime(
ProcessingLog.DATE_FORMAT).decode('utf-8') + '\n')
logfile.close()
@staticmethod
def logFilename():
batchfile = userFolder() + os.sep + 'processing.log'
return batchfile
@staticmethod
def addToLog(msgtype, msg):
try:
# It seems that this fails sometimes depending on the msg
# added. To avoid it stopping the normal functioning of the
# algorithm, we catch all errors, assuming that is better
# to miss some log info that breaking the algorithm.
if msgtype == ProcessingLog.LOG_ALGORITHM:
line = msgtype + '|' + datetime.datetime.now().strftime(
ProcessingLog.DATE_FORMAT).decode('utf-8') + '|' \
+ msg + '\n'
logfile = codecs.open(ProcessingLog.logFilename(), 'a',
encoding='utf-8')
logfile.write(line)
logfile.close()
algname = msg[len('Processing.runalg("'):]
algname = algname[:algname.index('"')]
if algname not in ProcessingLog.recentAlgs:
ProcessingLog.recentAlgs.append(algname)
recentAlgsString = ';'.join(ProcessingLog.recentAlgs[-6:])
ProcessingConfig.setSettingValue(
ProcessingConfig.RECENT_ALGORITHMS,
recentAlgsString)
else:
if isinstance(msg, list):
msg = '\n'.join([m for m in msg])
msgtypes = {ProcessingLog.LOG_ERROR: QgsMessageLog.CRITICAL,
ProcessingLog.LOG_INFO: QgsMessageLog.INFO,
ProcessingLog.LOG_WARNING: QgsMessageLog.WARNING, }
QgsMessageLog.logMessage(msg, "Processing", msgtypes[msgtype])
except:
pass
@staticmethod
def getLogEntries():
entries = {}
errors = []
algorithms = []
warnings = []
info = []
lines = tail(ProcessingLog.logFilename())
for line in lines:
line = line.strip('\n').strip()
tokens = line.split('|')
text = ''
for i in range(2, len(tokens)):
text += tokens[i] + '|'
if line.startswith(ProcessingLog.LOG_ERROR):
errors.append(LogEntry(tokens[1], text))
elif line.startswith(ProcessingLog.LOG_ALGORITHM):
algorithms.append(LogEntry(tokens[1], tokens[2]))
elif line.startswith(ProcessingLog.LOG_WARNING):
warnings.append(LogEntry(tokens[1], text))
elif line.startswith(ProcessingLog.LOG_INFO):
info.append(LogEntry(tokens[1], text))
entries[ProcessingLog.LOG_ALGORITHM] = algorithms
return entries
@staticmethod
def getRecentAlgorithms():
recentAlgsSetting = ProcessingConfig.getSetting(
ProcessingConfig.RECENT_ALGORITHMS)
try:
ProcessingLog.recentAlgs = recentAlgsSetting.split(';')
except:
pass
return ProcessingLog.recentAlgs
@staticmethod
def clearLog():
os.unlink(ProcessingLog.logFilename())
ProcessingLog.startLogging()
@staticmethod
def saveLog(fileName):
entries = ProcessingLog.getLogEntries()
with codecs.open(fileName, 'w', encoding='utf-8') as f:
for k, v in entries.iteritems():
for entry in v:
f.write('%s|%s|%s\n' % (k, entry.date, entry.text))
class LogEntry:
def __init__(self, date, text):
self.date = date
self.text = text
"""
***************************************************************************
This code has been take from pytailer
http://code.google.com/p/pytailer/
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
***************************************************************************
"""
class Tailer(object):
"""Implements tailing and heading functionality like GNU tail and
head commands.
"""
line_terminators = ('\r\n', '\n', '\r')
def __init__(self, filename, read_size=1024, end=False):
self.read_size = read_size
self.file = codecs.open(filename, encoding='utf-8')
self.start_pos = self.file.tell()
if end:
self.seek_end()
def splitlines(self, data):
return re.split('|'.join(self.line_terminators), data)
def seek_end(self):
self.seek(0, 2)
def seek(self, pos, whence=0):
self.file.seek(pos, whence)
def read(self, read_size=None):
if read_size:
read_str = self.file.read(read_size)
else:
read_str = self.file.read()
return (len(read_str), read_str)
def seek_line(self):
"""Searches backwards from the current file position for a
line terminator and seeks to the charachter after it.
"""
pos = end_pos = self.file.tell()
read_size = self.read_size
if pos > read_size:
pos -= read_size
else:
pos = 0
read_size = end_pos
self.seek(pos)
(bytes_read, read_str) = self.read(read_size)
if bytes_read and read_str[-1] in self.line_terminators:
# The last charachter is a line terminator, don't count
# this one.
bytes_read -= 1
if read_str[-2:] == '\r\n' and '\r\n' in self.line_terminators:
# found CRLF
bytes_read -= 1
while bytes_read > 0:
# Scan backward, counting the newlines in this bufferfull
i = bytes_read - 1
while i >= 0:
if read_str[i] in self.line_terminators:
self.seek(pos + i + 1)
return self.file.tell()
i -= 1
if pos == 0 or pos - self.read_size < 0:
# Not enought lines in the buffer, send the whole file
self.seek(0)
return None
pos -= self.read_size
self.seek(pos)
(bytes_read, read_str) = self.read(self.read_size)
return None
def tail(self, lines=10):
"""Return the last lines of the file.
"""
self.seek_end()
end_pos = self.file.tell()
for i in xrange(lines):
if not self.seek_line():
break
data = self.file.read(end_pos - self.file.tell() - 1)
if data:
return self.splitlines(data)
else:
return []
def __iter__(self):
return self.follow()
def close(self):
self.file.close()
def tail(file, lines=200):
return Tailer(file).tail(lines)
| gpl-2.0 |
moble/sympy | sympy/polys/domains/tests/test_polynomialring.py | 99 | 3314 | """Tests for the PolynomialRing classes. """
from sympy.polys.domains import QQ, ZZ
from sympy.polys.polyerrors import ExactQuotientFailed, CoercionFailed, NotReversible
from sympy.abc import x, y
from sympy.utilities.pytest import raises
def test_build_order():
R = QQ.old_poly_ring(x, y, order=(("lex", x), ("ilex", y)))
assert R.order((1, 5)) == ((1,), (-5,))
def test_globalring():
Qxy = QQ.old_frac_field(x, y)
R = QQ.old_poly_ring(x, y)
X = R.convert(x)
Y = R.convert(y)
assert x in R
assert 1/x not in R
assert 1/(1 + x) not in R
assert Y in R
assert X.ring == R
assert X * (Y**2 + 1) == R.convert(x * (y**2 + 1))
assert X * y == X * Y == R.convert(x * y) == x * Y
assert X + y == X + Y == R.convert(x + y) == x + Y
assert X - y == X - Y == R.convert(x - y) == x - Y
assert X + 1 == R.convert(x + 1)
raises(ExactQuotientFailed, lambda: X/Y)
raises(ExactQuotientFailed, lambda: x/Y)
raises(ExactQuotientFailed, lambda: X/y)
assert X**2 / X == X
assert R.from_GlobalPolynomialRing(ZZ.old_poly_ring(x, y).convert(x), ZZ.old_poly_ring(x, y)) == X
assert R.from_FractionField(Qxy.convert(x), Qxy) == X
assert R.from_FractionField(Qxy.convert(x)/y, Qxy) is None
assert R._sdm_to_vector(R._vector_to_sdm([X, Y], R.order), 2) == [X, Y]
def test_localring():
Qxy = QQ.old_frac_field(x, y)
R = QQ.old_poly_ring(x, y, order="ilex")
X = R.convert(x)
Y = R.convert(y)
assert x in R
assert 1/x not in R
assert 1/(1 + x) in R
assert Y in R
assert X.ring == R
assert X*(Y**2 + 1)/(1 + X) == R.convert(x*(y**2 + 1)/(1 + x))
assert X*y == X*Y
raises(ExactQuotientFailed, lambda: X/Y)
raises(ExactQuotientFailed, lambda: x/Y)
raises(ExactQuotientFailed, lambda: X/y)
assert X + y == X + Y == R.convert(x + y) == x + Y
assert X - y == X - Y == R.convert(x - y) == x - Y
assert X + 1 == R.convert(x + 1)
assert X**2 / X == X
assert R.from_GlobalPolynomialRing(ZZ.old_poly_ring(x, y).convert(x), ZZ.old_poly_ring(x, y)) == X
assert R.from_FractionField(Qxy.convert(x), Qxy) == X
raises(CoercionFailed, lambda: R.from_FractionField(Qxy.convert(x)/y, Qxy))
raises(ExactQuotientFailed, lambda: X/Y)
raises(NotReversible, lambda: X.invert())
assert R._sdm_to_vector(
R._vector_to_sdm([X/(X + 1), Y/(1 + X*Y)], R.order), 2) == \
[X*(1 + X*Y), Y*(1 + X)]
def test_conversion():
L = QQ.old_poly_ring(x, y, order="ilex")
G = QQ.old_poly_ring(x, y)
assert L.convert(x) == L.convert(G.convert(x), G)
assert G.convert(x) == G.convert(L.convert(x), L)
raises(CoercionFailed, lambda: G.convert(L.convert(1/(1 + x)), L))
def test_units():
R = QQ.old_poly_ring(x)
assert R.is_unit(R.convert(1))
assert R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert not R.is_unit(R.convert(1 + x))
R = QQ.old_poly_ring(x, order='ilex')
assert R.is_unit(R.convert(1))
assert R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert R.is_unit(R.convert(1 + x))
R = ZZ.old_poly_ring(x)
assert R.is_unit(R.convert(1))
assert not R.is_unit(R.convert(2))
assert not R.is_unit(R.convert(x))
assert not R.is_unit(R.convert(1 + x))
| bsd-3-clause |
nwiizo/workspace_2017 | ansible-modules-core/network/eos/eos_eapi.py | 13 | 13144 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = """
---
module: eos_eapi
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage and configure Arista EOS eAPI.
requirements:
- "EOS v4.12 or greater"
description:
- Use to enable or disable eAPI access, and set the port and state
of http, https, local_http and unix-socket servers.
- When enabling eAPI access the default is to enable HTTP on port
80, enable HTTPS on port 443, disable local HTTP, and disable
Unix socket server. Use the options listed below to override the
default configuration.
- Requires EOS v4.12 or greater.
extends_documentation_fragment: eos
options:
http:
description:
- The C(http) argument controls the operating state of the HTTP
transport protocol when eAPI is present in the running-config.
When the value is set to True, the HTTP protocol is enabled and
when the value is set to False, the HTTP protocol is disabled.
By default, when eAPI is first configured, the HTTP protocol is
disabled.
required: false
default: no
choices: ['yes', 'no']
aliases: ['enable_http']
http_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 80
https:
description:
- The C(https) argument controls the operating state of the HTTPS
transport protocol when eAPI is present in the running-config.
When the value is set to True, the HTTPS protocol is enabled and
when the value is set to False, the HTTPS protocol is disabled.
By default, when eAPI is first configured, the HTTPS protocol is
enabled.
required: false
default: yes
choices: ['yes', 'no']
aliases: ['enable_http']
https_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 443
local_http:
description:
- The C(local_http) argument controls the operating state of the
local HTTP transport protocol when eAPI is present in the
running-config. When the value is set to True, the HTTP protocol
is enabled and restricted to connections from localhost only. When
the value is set to False, the HTTP local protocol is disabled.
- Note is value is independent of the C(http) argument
required: false
default: false
choices: ['yes', 'no']
aliases: ['enable_local_http']
local_http_port:
description:
- Configures the HTTP port that will listen for connections when
the HTTP transport protocol is enabled. This argument accepts
integer values in the valid range of 1 to 65535.
required: false
default: 8080
socket:
description:
- The C(socket) argument controls the operating state of the UNIX
Domain Socket used to receive eAPI requests. When the value
of this argument is set to True, the UDS will listen for eAPI
requests. When the value is set to False, the UDS will not be
available to handle requests. By default when eAPI is first
configured, the UDS is disabled.
required: false
default: false
choices: ['yes', 'no']
aliases: ['enable_socket']
vrf:
description:
- The C(vrf) argument will configure eAPI to listen for connections
in the specified VRF. By default, eAPI transports will listen
for connections in the global table. This value requires the
VRF to already be created otherwise the task will fail.
required: false
default: default
version_added: "2.2"
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: nul
version_added: "2.2"
state:
description:
- The C(state) argument controls the operational state of eAPI
on the remote device. When this argument is set to C(started),
eAPI is enabled to receive requests and when this argument is
C(stopped), eAPI is disabled and will not receive requests.
required: false
default: started
choices: ['started', 'stopped']
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: admin
password: admin
- name: Enable eAPI access with default configuration
eos_eapi:
state: started
provider: "{{ cli }}"
- name: Enable eAPI with no HTTP, HTTPS at port 9443, local HTTP at port 80, and socket enabled
eos_eapi:
state: started
http: false
https_port: 9443
local_http: yes
local_http_port: 80
socket: yes
provider: "{{ cli }}"
- name: Shutdown eAPI access
eos_eapi:
state: stopped
provider: "{{ cli }}"
"""
RETURN = """
updates:
description:
- Set of commands to be executed on remote device
returned: always
type: list
sample: ['management api http-commands', 'shutdown']
urls:
description: Hash of URL endpoints eAPI is listening on per interface
returned: when eAPI is started
type: dict
sample: {'Management1': ['http://172.26.10.1:80']}
"""
import re
import time
import ansible.module_utils.eos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
PRIVATE_KEYS_RE = re.compile('__.+__')
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_instance(module):
try:
resp = module.cli('show management api http-commands', 'json')
return dict(
http=resp[0]['httpServer']['configured'],
http_port=resp[0]['httpServer']['port'],
https=resp[0]['httpsServer']['configured'],
https_port=resp[0]['httpsServer']['port'],
local_http=resp[0]['localHttpServer']['configured'],
local_http_port=resp[0]['localHttpServer']['port'],
socket=resp[0]['unixSocketServer']['configured'],
vrf=resp[0]['vrf']
)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
def started(module, instance, commands):
commands.append('no shutdown')
setters = set()
for key, value in module.argument_spec.iteritems():
if module.params[key] is not None:
setter = value.get('setter') or 'set_%s' % key
if setter not in setters:
setters.add(setter)
invoke(setter, module, instance, commands)
def stopped(module, instance, commands):
commands.append('shutdown')
def set_protocol_http(module, instance, commands):
port = module.params['http_port']
if not 1 <= port <= 65535:
module.fail_json(msg='http_port must be between 1 and 65535')
elif any((module.params['http'], instance['http'])):
commands.append('protocol http port %s' % port)
elif module.params['http'] is False:
commands.append('no protocol http')
def set_protocol_https(module, instance, commands):
port = module.params['https_port']
if not 1 <= port <= 65535:
module.fail_json(msg='https_port must be between 1 and 65535')
elif any((module.params['https'], instance['https'])):
commands.append('protocol https port %s' % port)
elif module.params['https'] is False:
commands.append('no protocol https')
def set_local_http(module, instance, commands):
port = module.params['local_http_port']
if not 1 <= port <= 65535:
module.fail_json(msg='local_http_port must be between 1 and 65535')
elif any((module.params['local_http'], instance['local_http'])):
commands.append('protocol http localhost port %s' % port)
elif module.params['local_http'] is False:
commands.append('no protocol http localhost port 8080')
def set_socket(module, instance, commands):
if any((module.params['socket'], instance['socket'])):
commands.append('protocol unix-socket')
elif module.params['socket'] is False:
commands.append('no protocol unix-socket')
def set_vrf(module, instance, commands):
vrf = module.params['vrf']
if vrf != 'default':
resp = module.cli(['show vrf'])
if vrf not in resp[0]:
module.fail_json(msg="vrf '%s' is not configured" % vrf)
commands.append('vrf %s' % vrf)
def get_config(module):
contents = module.params['config']
if not contents:
cmd = 'show running-config all section management api http-commands'
contents = module.cli([cmd])
config = NetworkConfig(indent=3, contents=contents[0])
return config
def load_config(module, instance, commands, result):
commit = not module.check_mode
diff = module.config.load_config(commands, commit=commit)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def load(module, instance, commands, result):
candidate = NetworkConfig(indent=3)
candidate.add(commands, parents=['management api http-commands'])
config = get_config(module)
configobjs = candidate.difference(config)
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
result['updates'] = commands
load_config(module, instance, commands, result)
def clean_result(result):
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
def collect_facts(module, result):
resp = module.cli(['show management api http-commands'], output='json')
facts = dict(eos_eapi_urls=dict())
for each in resp[0]['urls']:
intf, url = each.split(' : ')
key = str(intf).strip()
if key not in facts['eos_eapi_urls']:
facts['eos_eapi_urls'][key] = list()
facts['eos_eapi_urls'][key].append(str(url).strip())
result['ansible_facts'] = facts
def main():
""" main entry point for module execution
"""
argument_spec = dict(
http=dict(aliases=['enable_http'], default=False, type='bool', setter='set_protocol_http'),
http_port=dict(default=80, type='int', setter='set_protocol_http'),
https=dict(aliases=['enable_https'], default=True, type='bool', setter='set_protocol_https'),
https_port=dict(default=443, type='int', setter='set_protocol_https'),
local_http=dict(aliases=['enable_local_http'], default=False, type='bool', setter='set_local_http'),
local_http_port=dict(default=8080, type='int', setter='set_local_http'),
socket=dict(aliases=['enable_socket'], default=False, type='bool'),
vrf=dict(default='default'),
config=dict(),
# Only allow use of transport cli when configuring eAPI
transport=dict(default='cli', choices=['cli']),
state=dict(default='started', choices=['stopped', 'started']),
)
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
supports_check_mode=True)
state = module.params['state']
result = dict(changed=False)
commands = list()
instance = get_instance(module)
invoke(state, module, instance, commands)
try:
load(module, instance, commands, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
collect_facts(module, result)
clean_result(result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| mit |
hatwar/Das_erpnext | erpnext/accounts/doctype/journal_entry/test_journal_entry.py | 25 | 7835 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest, frappe
from frappe.utils import flt
from erpnext.accounts.utils import get_actual_expense, BudgetError, get_fiscal_year
class TestJournalEntry(unittest.TestCase):
def test_journal_entry_with_against_jv(self):
jv_invoice = frappe.copy_doc(test_records[2])
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, jv_invoice)
def test_jv_against_sales_order(self):
from erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order
sales_order = make_sales_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[0])
self.jv_against_voucher_testcase(base_jv, sales_order)
def test_jv_against_purchase_order(self):
from erpnext.buying.doctype.purchase_order.test_purchase_order import create_purchase_order
purchase_order = create_purchase_order(do_not_save=True)
base_jv = frappe.copy_doc(test_records[1])
self.jv_against_voucher_testcase(base_jv, purchase_order)
def jv_against_voucher_testcase(self, base_jv, test_voucher):
dr_or_cr = "credit" if test_voucher.doctype in ["Sales Order", "Journal Entry"] else "debit"
field_dict = {'Journal Entry': "against_jv",
'Sales Order': "against_sales_order",
'Purchase Order': "against_purchase_order"
}
test_voucher.insert()
test_voucher.submit()
if test_voucher.doctype == "Journal Entry":
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where account = %s and docstatus = 1 and parent = %s""",
("_Test Receivable - _TC", test_voucher.name)))
self.assertTrue(not frappe.db.sql("""select name from `tabJournal Entry Account`
where %s=%s""" % (field_dict.get(test_voucher.doctype), '%s'), (test_voucher.name)))
base_jv.get("accounts")[0].is_advance = "Yes" if (test_voucher.doctype in ["Sales Order", "Purchase Order"]) else "No"
base_jv.get("accounts")[0].set(field_dict.get(test_voucher.doctype), test_voucher.name)
base_jv.insert()
base_jv.submit()
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where %s=%s""" % (field_dict.get(test_voucher.doctype), '%s'), (submitted_voucher.name)))
self.assertTrue(frappe.db.sql("""select name from `tabJournal Entry Account`
where %s=%s and %s=400""" % (field_dict.get(submitted_voucher.doctype), '%s', dr_or_cr), (submitted_voucher.name)))
if base_jv.get("accounts")[0].is_advance == "Yes":
self.advance_paid_testcase(base_jv, submitted_voucher, dr_or_cr)
self.cancel_against_voucher_testcase(submitted_voucher)
def advance_paid_testcase(self, base_jv, test_voucher, dr_or_cr):
#Test advance paid field
advance_paid = frappe.db.sql("""select advance_paid from `tab%s`
where name=%s""" % (test_voucher.doctype, '%s'), (test_voucher.name))
payment_against_order = base_jv.get("accounts")[0].get(dr_or_cr)
self.assertTrue(flt(advance_paid[0][0]) == flt(payment_against_order))
def cancel_against_voucher_testcase(self, test_voucher):
if test_voucher.doctype == "Journal Entry":
# if test_voucher is a Journal Entry, test cancellation of test_voucher
test_voucher.cancel()
self.assertTrue(not frappe.db.sql("""select name from `tabJournal Entry Account`
where against_jv=%s""", test_voucher.name))
elif test_voucher.doctype in ["Sales Order", "Purchase Order"]:
# if test_voucher is a Sales Order/Purchase Order, test error on cancellation of test_voucher
submitted_voucher = frappe.get_doc(test_voucher.doctype, test_voucher.name)
self.assertRaises(frappe.LinkExistsError, submitted_voucher.cancel)
def test_jv_against_stock_account(self):
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
set_perpetual_inventory()
jv = frappe.copy_doc(test_records[0])
jv.get("accounts")[0].update({
"account": "_Test Warehouse - _TC",
"party_type": None,
"party": None
})
jv.insert()
from erpnext.accounts.general_ledger import StockAccountInvalidTransaction
self.assertRaises(StockAccountInvalidTransaction, jv.submit)
set_perpetual_inventory(0)
def test_monthly_budget_crossed_ignore(self):
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 40000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv.name}))
def test_monthly_budget_crossed_stop(self):
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Stop")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 40000, "_Test Cost Center - _TC")
self.assertRaises(BudgetError, jv.submit)
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
def test_yearly_budget_crossed_stop(self):
self.test_monthly_budget_crossed_ignore()
frappe.db.set_value("Company", "_Test Company", "yearly_bgt_flag", "Stop")
self.set_total_expense_zero("2013-02-28")
jv = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 150000, "_Test Cost Center - _TC")
self.assertRaises(BudgetError, jv.submit)
frappe.db.set_value("Company", "_Test Company", "yearly_bgt_flag", "Ignore")
def test_monthly_budget_on_cancellation(self):
self.set_total_expense_zero("2013-02-28")
jv1 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 20000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv1.name}))
jv2 = make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", 20000, "_Test Cost Center - _TC", submit=True)
self.assertTrue(frappe.db.get_value("GL Entry",
{"voucher_type": "Journal Entry", "voucher_no": jv2.name}))
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Stop")
self.assertRaises(BudgetError, jv1.cancel)
frappe.db.set_value("Company", "_Test Company", "monthly_bgt_flag", "Ignore")
def get_actual_expense(self, monthly_end_date):
return get_actual_expense({
"account": "_Test Account Cost for Goods Sold - _TC",
"cost_center": "_Test Cost Center - _TC",
"monthly_end_date": monthly_end_date,
"company": "_Test Company",
"fiscal_year": get_fiscal_year(monthly_end_date)[0]
})
def set_total_expense_zero(self, posting_date):
existing_expense = self.get_actual_expense(posting_date)
make_journal_entry("_Test Account Cost for Goods Sold - _TC",
"_Test Account Bank Account - _TC", -existing_expense, "_Test Cost Center - _TC", submit=True)
def make_journal_entry(account1, account2, amount, cost_center=None, submit=False):
jv = frappe.new_doc("Journal Entry")
jv.posting_date = "2013-02-14"
jv.company = "_Test Company"
jv.fiscal_year = "_Test Fiscal Year 2013"
jv.user_remark = "test"
jv.set("accounts", [
{
"account": account1,
"cost_center": cost_center,
"debit": amount if amount > 0 else 0,
"credit": abs(amount) if amount < 0 else 0,
}, {
"account": account2,
"cost_center": cost_center,
"credit": amount if amount > 0 else 0,
"debit": abs(amount) if amount < 0 else 0,
}
])
jv.insert()
if submit:
jv.submit()
return jv
test_records = frappe.get_test_records('Journal Entry')
| agpl-3.0 |
iaklampanos/dj-vercereg | dj_vercereg/dj_vercereg/settings.py | 1 | 6595 | # Copyright 2014 The University of Edinburgh
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Django settings for dj_vercereg project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vzb127q8k@vz5mqt5ct-(20ddyaklr4kuy^65!8+az0u)a!*^s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# application definition
# TEMPLATE_CONTEXT_PROCESSORS = (
# "django.contrib.auth.context_processors.auth",
# "django.core.context_processors.debug",
# "django.core.context_processors.i18n",
# "django.core.context_processors.media",
# "django.core.context_processors.static",
# "django.core.context_processors.tz",
# "django.contrib.messages.context_processors.messages",
# 'django.core.context_processors.request'
# )
# Pip package update 12/10/2018 (davve.ath)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG,
},
},
]
INSTALLED_APPS = (
# 'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'vercereg',
'rest_framework',
'rest_framework.authtoken',
'reversion',
# 'django_pdb',
'guardian',
'rest_framework_swagger',
'watson',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dj_vercereg.urls'
WSGI_APPLICATION = 'dj_vercereg.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT= os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
# )
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
#'PAGINATE_BY': 10,
}
# For django guardian: ########################################
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
###############################################################
# Pip package update 12/10/2018 (davve.ath)
# from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
#
# TEMPLATE_CONTEXT_PROCESSORS = TCP + (
# 'django.core.context_processors.request',
# )
# Django Suit configuration example
#SUIT_CONFIG = {
# header
# 'ADMIN_NAME': 'VERCE Registry Admin',
# 'HEADER_DATE_FORMAT': 'l, j. F Y',
# 'HEADER_TIME_FORMAT': 'H:i',
# forms
# 'SHOW_REQUIRED_ASTERISK': True, # Default True
# 'CONFIRM_UNSAVED_CHANGES': True, # Default True
# menu
# 'SEARCH_URL': '/admin/auth/user/',
# 'MENU_ICONS': {
# 'sites': 'icon-leaf',
# 'auth': 'icon-lock',
# },
# 'MENU_OPEN_FIRST_CHILD': True, # Default True
# 'MENU_EXCLUDE': ('auth.group',),
# 'MENU': (
# 'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')},
# {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
# ),
# misc
# 'LIST_PER_PAGE': 15
#}
SWAGGER_SETTINGS = {
"exclude_namespaces": [], # List URL namespaces to ignore
"api_version": '0.1', # Specify your API's version
"api_path": "/", # Specify the path to your API not a root level
"enabled_methods": [ # Specify which methods to enable in Swagger UI
'get',
'post',
'put'
],
# "api_key": '4737f71829a7eff1e077268b89696ab536c26a11', # An API key
"is_authenticated": True, # Set to True to enforce user authentication,
"is_superuser": False, # Set to True to enforce admin only access
"permission_denied_handler": None, # If user has no permisssion, raise 403 error
"info": {
# Configure some Swagger UI variables, for details see:
# https://github.com/swagger-api/swagger-spec/blob/master/versions/1.2.md#513-info-object
'contact': '[email protected]',
'description': '',
'license': 'Apache 2.0',
'licenseUrl': 'http://www.apache.org/licenses/LICENSE-2.0.html',
'termsOfServiceUrl': '',
'title': 'VERCE dispel4py Registry',
},
}
try:
from local_settings import *
except ImportError:
pass
| apache-2.0 |
jstammers/EDMSuite | NavPython/IronPython/Lib/pprint.py | 147 | 11932 | # Author: Fred L. Drake, Jr.
# [email protected]
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
import warnings
from cStringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
def _sorted(iterable):
with warnings.catch_warnings():
if _sys.py3kwarning:
warnings.filterwarnings("ignore", "comparing unequal types "
"not supported", DeprecationWarning)
return sorted(iterable)
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if self._depth and level > self._depth:
write(rep)
return
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
items = _sorted(object.items())
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
if sepLines:
write(',\n%s%s: ' % (' '*indent, rep))
else:
write(', %s: ' % rep)
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, set):
if not length:
write('set()')
return
write('set([')
endchar = '])'
object = _sorted(object)
indent += 4
elif issubclass(typ, frozenset):
if not length:
write('frozenset()')
return
write('frozenset([')
endchar = '])'
object = _sorted(object)
indent += 10
else:
write('(')
endchar = ')'
if self._indent_per_level > 1 and sepLines:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
if sepLines:
write(',\n' + ' '*indent)
else:
write(', ')
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in _sorted(object.items()):
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
| mit |
m4nolo/steering-all | src/entities/relativeentity.py | 2 | 1279 | from entity import Entity
class RelativeEntity(Entity):
def __init__(self, width, height):
Entity.__init__(self, width, height)
self.margin = [0, 0, 0, 0]
def below(self, entity):
self.y = entity.y + entity.height + self.margin[1]
def above(self, entity):
self.y = entity.y - self.height - self.margin[3]
def leftOf(self, entity):
self.x = entity.x - self.width - self.margin[2]
def rightOf(self, entity):
self.x = entity.x + entity.width + self.margin[0]
def margin(self, margin):
self.margin = margin;
def marginLeft(self, margin):
self.margin[0] = margin
def marginRight(self, margin):
self.margin[2] = margin
def marginTop(self, margin):
self.margin[1] = margin
def marginBottom(self, margin):
self.margin[3] = margin
def alignLeft(self):
self.x = 0 + self.margin[0]
def alignRight(self, width):
self.x = width - self.width - self.margin[2]
def alignTop(self):
self.y = 0 + self.margin[1]
def alignBottom(self, height):
self.y = height - self.height - self.margin[3]
def centerRelativeX(self, entity):
self.x = entity.x + (entity.width / 2) - (self.width / 2)
def centerRelativeY(self, entity):
self.y = entity.y + (entity.height / 2) - (self.height / 2)
| mit |
etkirsch/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
petercable/mi-instrument | mi/platform/rsn/simulator/oms_events.py | 5 | 10097 | #!/usr/bin/env python
"""
@package ion.agents.platform.rsn.simulator.oms_events
@file ion/agents/platform/rsn/simulator/oms_events.py
@author Carlos Rueda
@brief OMS simulator event definitions and supporting functions.
Demo program included that allows to run both a listener server and a
notifier. See demo program usage at the end of this file.
"""
__author__ = 'Carlos Rueda'
__license__ = 'Apache 2.0'
import sys
from time import sleep
import time
import ntplib
from urlparse import urlparse
import httplib
import yaml
import json
from ooi.logging import log
##########################################################################
# The "event type" concept was removed from the interface (~Apr/2013).
# To minimize changes in the code, simply introduce an 'ALL' event type here.
class EventInfo(object):
EVENT_TYPES = {
'ALL': {
'name': 'on battery',
'severity': 3,
'group': 'power',
}
}
class EventNotifier(object):
def __init__(self):
# _listeners: { event_type: {url: reg_time, ...}, ... }
# initialize with empty dict for each event type:
self._listeners = dict((et, {}) for et in EventInfo.EVENT_TYPES)
def add_listener(self, url, event_type):
assert event_type in EventInfo.EVENT_TYPES
url_dict = self._listeners[event_type]
if not url in url_dict:
url_dict[url] = ntplib.system_to_ntp_time(time.time())
log.trace("added listener=%s for event_type=%s", url, event_type)
return url_dict[url]
def remove_listener(self, url, event_type):
assert event_type in EventInfo.EVENT_TYPES
url_dict = self._listeners[event_type]
unreg_time = 0
if url in url_dict:
unreg_time = ntplib.system_to_ntp_time(time.time())
del url_dict[url]
log.trace("removed listener=%s for event_type=%s", url, event_type)
return unreg_time
def notify(self, event_instance):
"""
Notifies the event to all associated listeners.
"""
assert isinstance(event_instance, dict)
urls = self._listeners['ALL']
if not len(urls):
# no event listeners for event_type; just ignore notification:
return
# copy list to get a snapshot of the current dictionary and thus avoid
# concurrent modification kind of runtime errors like:
# RuntimeError: dictionary changed size during iteration
urls = list(urls)
for url in urls:
self._notify_listener(url, event_instance)
def _notify_listener(self, url, event_instance):
"""
Notifies event to given listener.
"""
if url == "http://NO_OMS_NOTIFICATIONS": # pragma: no cover
# developer convenience -see ion.agents.platform.rsn.oms_event_listener
return
log.debug("Notifying event_instance=%s to listener=%s", str(event_instance), url)
# include url in event instance for diagnostic/debugging purposes:
event_instance['listener_url'] = url
# prepare payload (JSON format):
payload = json.dumps(event_instance, indent=2)
log.trace("payload=\n%s", payload)
headers = {
"Content-type": "application/json",
"Accept": "text/plain"
}
conn = None
try:
o = urlparse(url)
url4conn = o.netloc
path = o.path
conn = httplib.HTTPConnection(url4conn)
conn.request("POST", path, body=payload, headers=headers)
response = conn.getresponse()
data = response.read()
log.trace("RESPONSE: %s, %s, %s", response.status, response.reason, data)
except Exception as e:
# the actual listener is no longer there; just log a message
log.warn("event notification HTTP request failed: %r: %s", url, e)
finally:
if conn:
conn.close()
class EventGenerator(object):
"""
Simple helper to generate and trigger event notifications.
"""
def __init__(self, notifier):
self._notifier = notifier
self._keep_running = True
self._index = 0 # in EventInfo.EVENT_TYPES
# self._runnable set depending on whether we're under pyon or not
if 'pyon' in sys.modules:
from gevent import Greenlet
self._runnable = Greenlet(self._run)
log.debug("!!!! EventGenerator: pyon detected: using Greenlet")
else:
from threading import Thread
self._runnable = Thread(target=self._run)
self._runnable.setDaemon(True)
log.debug("!!!! EventGenerator: pyon not detected: using Thread")
def generate_and_notify_event(self):
if self._index >= len(EventInfo.EVENT_TYPES):
self._index = 0
event_type = EventInfo.EVENT_TYPES.values()[self._index]
self._index += 1
platform_id = "TODO_some_platform_id"
message = "%s (synthetic event generated from simulator)" % event_type['name']
group = event_type['group']
timestamp = ntplib.system_to_ntp_time(time.time())
first_time_timestamp = timestamp
severity = event_type['severity']
event_instance = {
'message': message,
'platform_id': platform_id,
'timestamp': timestamp,
'first_time_timestamp': first_time_timestamp,
'severity': severity,
'group': group,
}
log.debug("notifying event_instance=%s", str(event_instance))
self._notifier.notify(event_instance)
def start(self):
self._runnable.start()
def _run(self):
sleep(3) # wait a bit before first event
while self._keep_running:
self.generate_and_notify_event()
# sleep for a few secs regularly checking we still are running
secs = 7
while self._keep_running and secs > 0:
sleep(0.3)
secs -= 0.3
log.trace("event generation stopped.")
def stop(self):
log.trace("stopping event generation...")
self._keep_running = False
if __name__ == "__main__": # pragma: no cover
#
# first, call this demo program with command line argument 'listener',
# then, on a second terminal, with argument 'notifier'
#
host, port = "localhost", 8000
import sys
if len(sys.argv) > 1 and sys.argv[1] == "listener":
# run listener
from gevent.pywsgi import WSGIServer
def application(environ, start_response):
#print('listener got environ=%s' % str(environ))
print(" ".join(('%s=%s' % (k, environ[k])) for k in [
'CONTENT_LENGTH','CONTENT_TYPE', 'HTTP_ACCEPT']))
input = environ['wsgi.input']
body = "".join(input.readlines())
print('body=\n%s' % body)
#
# note: the expected content format is JSON and we can in general
# parse with either json or yaml ("every JSON file is also a valid
# YAML file" -- http://yaml.org/spec/1.2/spec.html#id2759572):
#
event_instance = yaml.load(body)
print('event_instance=%s' % str(event_instance))
# respond OK:
headers = [('Content-Type', 'text/plain') ]
status = '200 OK'
start_response(status, headers)
return "MY-RESPONSE. BYE"
print("%s:%s: listening for event notifications..." % (host, port))
WSGIServer((host, port), application).serve_forever()
elif len(sys.argv) > 1 and sys.argv[1] == "notifier":
# run notifier
notifier = EventNotifier()
url = "http://%s:%s" % (host, port)
for event_type in EventInfo.EVENT_TYPES.keys():
notifier.add_listener(url, event_type)
print("registered listener to event_type=%r" % event_type)
generator = EventGenerator(notifier)
secs = 15
print("generating events for %s seconds ..." % secs)
generator.start()
sleep(secs)
generator.stop()
else:
print("usage: call me with arg 'listener' or 'notifier'")
"""
Test program
TERMINAL 1:
$ bin/python ion/agents/platform/rsn/simulator/oms_events.py listener
localhost:8000: listening for event notifications...
TERMINAL 2:
oms_simulator: setting log level to: logging.WARN
registered listener to event_type='ALL'
generating events for 15 seconds ...
TERMINAL 1:
CONTENT_LENGTH=270 CONTENT_TYPE=application/json HTTP_ACCEPT=text/plain
body=
{
"group": "power",
"severity": 3,
"url": "http://localhost:8000",
"timestamp": 3578265811.422655,
"platform_id": "TODO_some_platform_id",
"message": "on battery (synthetic event generated from simulator)",
"first_time_timestamp": 3578265811.422655
}
event_instance={'platform_id': 'TODO_some_platform_id', 'group': 'power', 'severity': 3, 'url': 'http://localhost:8000', 'timestamp': 3578265811.422655, 'message': 'on battery (synthetic event generated from simulator)', 'first_time_timestamp': 3578265811.422655}
127.0.0.1 - - [2013-05-22 19:43:31] "POST / HTTP/1.1" 200 118 0.002814
CONTENT_LENGTH=270 CONTENT_TYPE=application/json HTTP_ACCEPT=text/plain
body=
{
"group": "power",
"severity": 3,
"url": "http://localhost:8000",
"timestamp": 3578265818.647295,
"platform_id": "TODO_some_platform_id",
"message": "on battery (synthetic event generated from simulator)",
"first_time_timestamp": 3578265818.647295
}
event_instance={'platform_id': 'TODO_some_platform_id', 'group': 'power', 'severity': 3, 'url': 'http://localhost:8000', 'timestamp': 3578265818.647295, 'message': 'on battery (synthetic event generated from simulator)', 'first_time_timestamp': 3578265818.647295}
127.0.0.1 - - [2013-05-22 19:43:38] "POST / HTTP/1.1" 200 118 0.003455
"""
| bsd-2-clause |
neurokernel/retina | retina/screen/map/pointmap.py | 1 | 2303 | from abc import ABCMeta, abstractmethod
class PointMap(object):
""" Interface of mapping a point from one surface to another
(hence the 2 parameters)
"""
__metaclass__ = ABCMeta
@abstractmethod
def map(self, p1, p2):
""" map of point (p1, p2) from one surface to another """
return
@abstractmethod
def invmap(self, p1, p2):
""" inverse map of point (p1, p2) from one surface to another """
return
class PointMapWithDirection(object):
""" Interface of mapping a point from one surface to another
(hence the 2 parameters)
"""
__metaclass__ = ABCMeta
@abstractmethod
def map(self, p1, p2, dp1, dp2):
""" map of point (p1, p2) from one surface to another
in direction (dp1, dp2) """
return
@abstractmethod
def invmap(self, p1, p2, dp1, dp2):
""" inverse map of point (p1, p2) from one surface to another
in direction (dp1, dp2) """
return
class EyeToImagePointMap(PointMap):
""" Encapsulates 2 PointMap transformations to map a point
from the fly's eye to a screen and then to an image
"""
__metaclass__ = ABCMeta
@abstractmethod
def map_screen_to_image(self, p1, p2):
mapscreen = self._map_screen
r1, r2 = mapscreen.map(p1, p2)
return (r1, r2)
@abstractmethod
def invmap_screen_to_image(self, p1, p2):
mapscreen = self._map_screen
r1, r2 = mapscreen.invmap(p1, p2)
return (r1, r2)
@abstractmethod
def map_eye_to_screen(self, p1, p2):
mapeye = self._map_eye
r1, r2 = mapeye.map(p1, p2)
return (r1, r2)
@abstractmethod
def invmap_eye_to_screen(self, p1, p2):
mapeye = self._map_eye
r1, r2 = mapeye.invmap(p1, p2)
return (r1, r2)
# implementation of superclass abstract classes
def map(self, p1, p2):
mapeye = self._map_eye
mapscreen = self._map_screen
t1, t2 = mapeye.map(p1, p2)
r1, r2 = mapscreen.map(t1, t2)
return (r1, r2)
def invmap(self, p1, p2):
mapeye = self._map_eye
mapscreen = self._map_screen
t1, t2 = mapscreen.invmap(p1, p2)
r1, r2 = mapeye.invmap(t1, t2)
return (r1, r2)
| bsd-3-clause |
initialed85/mac_os_scripts | mac_os_scripts/set_firmware_password.py | 1 | 1788 | """
This script is responsible for setting the firmware password
Commands used:
- expect -d -f /usr/local/zetta/mac_os_scripts/external/set_firmware_password_expect
set password [lindex $argv 0];
spawn firmwarepasswd -setpasswd -setmode command
expect {
"Enter new password:" {
send "$password\r"
exp_continue
}
"Re-enter new password:" {
send "$password\r"
exp_continue
}
}
"""
from common import CLITieIn
class FirmwarePasswordSetter(CLITieIn):
def set_firmware_password(self, password):
command = '/usr/bin/expect -d -f /usr/local/zetta/mac_os_scripts/external/set_firmware_password.expect \'{0}\''.format(
password)
command_output = self.command(command)
if command_output.error_level != 0:
self._logger.error(
'{0} failed stating {1}'.format(
command, command_output
)
)
return False
return True
def run(self, password):
if not self.set_firmware_password(password):
self._logger.error('failed set_firmware_password; cannot continue')
return False
self._logger.debug('passed')
return True
if __name__ == '__main__':
from utils import get_argparser, get_args
parser = get_argparser()
parser.add_argument(
'-f',
'--firmware-password',
type=str,
required=True,
help='firmware password to set'
)
args = get_args(parser)
actor = FirmwarePasswordSetter(
sudo_password=args.sudo_password,
)
result = actor.run(
password=args.firmware_password
)
if not result:
exit(1)
exit(0)
| mit |
KerkhoffTechnologies/django-connectwise | djconnectwise/migrations/0107_auto_20190729_1352.py | 1 | 1172 | # Generated by Django 2.1 on 2019-07-29 13:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0106_auto_20190722_1524'),
]
operations = [
migrations.AddField(
model_name='connectwiseboard',
name='bill_time',
field=models.CharField(blank=True, choices=[('Billable', 'Billable'), ('DoNotBill', 'Do Not Bill'), ('NoCharge', 'No Charge')], max_length=50, null=True),
),
migrations.AddField(
model_name='member',
name='work_role',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='djconnectwise.WorkRole'),
),
migrations.AddField(
model_name='member',
name='work_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='djconnectwise.WorkType'),
),
migrations.AddField(
model_name='worktype',
name='overall_default_flag',
field=models.BooleanField(default=False),
),
]
| mit |
a11r/grpc | src/python/grpcio_health_checking/grpc_health/v1/health.py | 15 | 2677 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Reference implementation for health checking in gRPC Python."""
import threading
import grpc
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
class HealthServicer(health_pb2_grpc.HealthServicer):
"""Servicer handling RPCs for service statuses."""
def __init__(self):
self._server_status_lock = threading.Lock()
self._server_status = {}
def Check(self, request, context):
with self._server_status_lock:
status = self._server_status.get(request.service)
if status is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
return health_pb2.HealthCheckResponse()
else:
return health_pb2.HealthCheckResponse(status=status)
def set(self, service, status):
"""Sets the status of a service.
Args:
service: string, the name of the service.
NOTE, '' must be set.
status: HealthCheckResponse.status enum value indicating
the status of the service
"""
with self._server_status_lock:
self._server_status[service] = status
| bsd-3-clause |
MjAbuz/watchdog | vendor/rdflib-2.4.0/rdflib/sparql/bison/SolutionModifier.py | 4 | 1223 | ASCENDING_ORDER = 1
DESCENDING_ORDER = 2
UNSPECIFIED_ORDER = 3
ORDER_VALUE_MAPPING = {
ASCENDING_ORDER : 'Ascending',
DESCENDING_ORDER : 'Descending',
UNSPECIFIED_ORDER : 'Default',
}
class SolutionModifier(object):
def __init__(self,orderClause=None,limitClause=None,offsetClause=None):
self.orderClause = orderClause
self.limitClause = limitClause
self.offsetClause = offsetClause
def __repr__(self):
if not(self.orderClause or self.limitClause or self.offsetClause):
return ""
return "<SoutionModifier:%s%s%s>"%(
self.orderClause and ' ORDER BY %s'%self.orderClause or '',
self.limitClause and ' LIMIT %s'%self.limitClause or '',
self.offsetClause and ' OFFSET %s'%self.offsetClause or '')
class ParsedOrderConditionExpression(object):
"""
A list of OrderConditions
OrderCondition ::= (('ASC'|'DESC')BrackettedExpression )|(FunctionCall|Var|BrackettedExpression)
"""
def __init__(self,expression,order):
self.expression = expression
self.order = order
def __repr__(self):
return "%s(%s)"%(ORDER_VALUE_MAPPING[self.order],self.expression.reduce())
| agpl-3.0 |
h0nIg/ansible-modules-extras | cloud/google/gce_img.py | 23 | 5760 | #!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""An Ansible module to utilize GCE image resources."""
DOCUMENTATION = '''
---
module: gce_img
version_added: "1.9"
short_description: utilize GCE image resources
description:
- This module can create and delete GCE private images from gzipped
compressed tarball containing raw disk data or from existing detached
disks in any zone. U(https://cloud.google.com/compute/docs/images)
options:
name:
description:
- the name of the image to create or delete
required: true
default: null
description:
description:
- an optional description
required: false
default: null
source:
description:
- the source disk or the Google Cloud Storage URI to create the image from
required: false
default: null
state:
description:
- desired state of the image
required: false
default: "present"
choices: ["present", "absent"]
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
timeout:
description:
- timeout for the operation
required: false
default: 180
version_added: "2.0"
service_account_email:
description:
- service account email
required: false
default: null
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
project_id:
description:
- your GCE project ID
required: false
default: null
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Peter Tan (@tanpeter)"
'''
EXAMPLES = '''
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
- gce_img:
name: test-image
source: test-disk
zone: us-central1-a
state: present
# Create an image named test-image from a tarball in Google Cloud Storage.
- gce_img:
name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme
- gce_img:
name: test-image
source: gs://bucket/path/to/image.tgz
# Delete an image named test-image.
- gce_img:
name: test-image
state: absent
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
_ = Provider.GCE
has_libcloud = True
except ImportError:
has_libcloud = False
GCS_URI = 'https://storage.googleapis.com/'
def create_image(gce, name, module):
"""Create an image with the specified name."""
source = module.params.get('source')
zone = module.params.get('zone')
desc = module.params.get('description')
timeout = module.params.get('timeout')
if not source:
module.fail_json(msg='Must supply a source', changed=False)
if source.startswith(GCS_URI):
# source is a Google Cloud Storage URI
volume = source
elif source.startswith('gs://'):
# libcloud only accepts https URI.
volume = source.replace('gs://', GCS_URI)
else:
try:
volume = gce.ex_get_volume(source, zone)
except ResourceNotFoundError:
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
changed=False)
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
old_timeout = gce.connection.timeout
try:
gce.connection.timeout = timeout
gce.ex_create_image(name, volume, desc, False)
return True
except ResourceExistsError:
return False
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
finally:
gce.connection.timeout = old_timeout
def delete_image(gce, name, module):
"""Delete a specific image resource by name."""
try:
gce.ex_delete_image(name)
return True
except ResourceNotFoundError:
return False
except GoogleBaseError, e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
description=dict(),
source=dict(),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(),
project_id=dict(),
timeout=dict(type='int', default=180)
)
)
if not has_libcloud:
module.fail_json(msg='libcloud with GCE support is required.')
gce = gce_connect(module)
name = module.params.get('name')
state = module.params.get('state')
changed = False
# user wants to create an image.
if state == 'present':
changed = create_image(gce, name, module)
# user wants to delete the image.
if state == 'absent':
changed = delete_image(gce, name, module)
module.exit_json(changed=changed, name=name)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()
| gpl-3.0 |
dd00/commandergenius | project/jni/python/src/Lib/test/test_richcmp.py | 55 | 11262 | # Tests for rich comparisons
import unittest
from test import test_support
import operator
class Number:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __cmp__(self, other):
raise test_support.TestFailed, "Number.__cmp__() should not be called"
def __repr__(self):
return "Number(%r)" % (self.x, )
class Vector:
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, i, v):
self.data[i] = v
__hash__ = None # Vectors cannot be hashed
def __nonzero__(self):
raise TypeError, "Vectors cannot be used in Boolean contexts"
def __cmp__(self, other):
raise test_support.TestFailed, "Vector.__cmp__() should not be called"
def __repr__(self):
return "Vector(%r)" % (self.data, )
def __lt__(self, other):
return Vector([a < b for a, b in zip(self.data, self.__cast(other))])
def __le__(self, other):
return Vector([a <= b for a, b in zip(self.data, self.__cast(other))])
def __eq__(self, other):
return Vector([a == b for a, b in zip(self.data, self.__cast(other))])
def __ne__(self, other):
return Vector([a != b for a, b in zip(self.data, self.__cast(other))])
def __gt__(self, other):
return Vector([a > b for a, b in zip(self.data, self.__cast(other))])
def __ge__(self, other):
return Vector([a >= b for a, b in zip(self.data, self.__cast(other))])
def __cast(self, other):
if isinstance(other, Vector):
other = other.data
if len(self.data) != len(other):
raise ValueError, "Cannot compare vectors of different length"
return other
opmap = {
"lt": (lambda a,b: a< b, operator.lt, operator.__lt__),
"le": (lambda a,b: a<=b, operator.le, operator.__le__),
"eq": (lambda a,b: a==b, operator.eq, operator.__eq__),
"ne": (lambda a,b: a!=b, operator.ne, operator.__ne__),
"gt": (lambda a,b: a> b, operator.gt, operator.__gt__),
"ge": (lambda a,b: a>=b, operator.ge, operator.__ge__)
}
class VectorTest(unittest.TestCase):
def checkfail(self, error, opname, *args):
for op in opmap[opname]:
self.assertRaises(error, op, *args)
def checkequal(self, opname, a, b, expres):
for op in opmap[opname]:
realres = op(a, b)
# can't use assertEqual(realres, expres) here
self.assertEqual(len(realres), len(expres))
for i in xrange(len(realres)):
# results are bool, so we can use "is" here
self.assert_(realres[i] is expres[i])
def test_mixed(self):
# check that comparisons involving Vector objects
# which return rich results (i.e. Vectors with itemwise
# comparison results) work
a = Vector(range(2))
b = Vector(range(3))
# all comparisons should fail for different length
for opname in opmap:
self.checkfail(ValueError, opname, a, b)
a = range(5)
b = 5 * [2]
# try mixed arguments (but not (a, b) as that won't return a bool vector)
args = [(a, Vector(b)), (Vector(a), b), (Vector(a), Vector(b))]
for (a, b) in args:
self.checkequal("lt", a, b, [True, True, False, False, False])
self.checkequal("le", a, b, [True, True, True, False, False])
self.checkequal("eq", a, b, [False, False, True, False, False])
self.checkequal("ne", a, b, [True, True, False, True, True ])
self.checkequal("gt", a, b, [False, False, False, True, True ])
self.checkequal("ge", a, b, [False, False, True, True, True ])
for ops in opmap.itervalues():
for op in ops:
# calls __nonzero__, which should fail
self.assertRaises(TypeError, bool, op(a, b))
class NumberTest(unittest.TestCase):
def test_basic(self):
# Check that comparisons involving Number objects
# give the same results give as comparing the
# corresponding ints
for a in xrange(3):
for b in xrange(3):
for typea in (int, Number):
for typeb in (int, Number):
if typea==typeb==int:
continue # the combination int, int is useless
ta = typea(a)
tb = typeb(b)
for ops in opmap.itervalues():
for op in ops:
realoutcome = op(a, b)
testoutcome = op(ta, tb)
self.assertEqual(realoutcome, testoutcome)
def checkvalue(self, opname, a, b, expres):
for typea in (int, Number):
for typeb in (int, Number):
ta = typea(a)
tb = typeb(b)
for op in opmap[opname]:
realres = op(ta, tb)
realres = getattr(realres, "x", realres)
self.assert_(realres is expres)
def test_values(self):
# check all operators and all comparison results
self.checkvalue("lt", 0, 0, False)
self.checkvalue("le", 0, 0, True )
self.checkvalue("eq", 0, 0, True )
self.checkvalue("ne", 0, 0, False)
self.checkvalue("gt", 0, 0, False)
self.checkvalue("ge", 0, 0, True )
self.checkvalue("lt", 0, 1, True )
self.checkvalue("le", 0, 1, True )
self.checkvalue("eq", 0, 1, False)
self.checkvalue("ne", 0, 1, True )
self.checkvalue("gt", 0, 1, False)
self.checkvalue("ge", 0, 1, False)
self.checkvalue("lt", 1, 0, False)
self.checkvalue("le", 1, 0, False)
self.checkvalue("eq", 1, 0, False)
self.checkvalue("ne", 1, 0, True )
self.checkvalue("gt", 1, 0, True )
self.checkvalue("ge", 1, 0, True )
class MiscTest(unittest.TestCase):
def test_misbehavin(self):
class Misb:
def __lt__(self, other): return 0
def __gt__(self, other): return 0
def __eq__(self, other): return 0
def __le__(self, other): raise TestFailed, "This shouldn't happen"
def __ge__(self, other): raise TestFailed, "This shouldn't happen"
def __ne__(self, other): raise TestFailed, "This shouldn't happen"
def __cmp__(self, other): raise RuntimeError, "expected"
a = Misb()
b = Misb()
self.assertEqual(a<b, 0)
self.assertEqual(a==b, 0)
self.assertEqual(a>b, 0)
self.assertRaises(RuntimeError, cmp, a, b)
def test_not(self):
# Check that exceptions in __nonzero__ are properly
# propagated by the not operator
import operator
class Exc(Exception):
pass
class Bad:
def __nonzero__(self):
raise Exc
def do(bad):
not bad
for func in (do, operator.not_):
self.assertRaises(Exc, func, Bad())
def test_recursion(self):
# Check that comparison for recursive objects fails gracefully
from UserList import UserList
a = UserList()
b = UserList()
a.append(b)
b.append(a)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
b.append(17)
# Even recursive lists of different lengths are different,
# but they cannot be ordered
self.assert_(not (a == b))
self.assert_(a != b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
a.append(17)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
a.insert(0, 11)
b.insert(0, 12)
self.assert_(not (a == b))
self.assert_(a != b)
self.assert_(a < b)
class DictTest(unittest.TestCase):
def test_dicts(self):
# Verify that __eq__ and __ne__ work for dicts even if the keys and
# values don't support anything other than __eq__ and __ne__ (and
# __hash__). Complex numbers are a fine example of that.
import random
imag1a = {}
for i in range(50):
imag1a[random.randrange(100)*1j] = random.randrange(100)*1j
items = imag1a.items()
random.shuffle(items)
imag1b = {}
for k, v in items:
imag1b[k] = v
imag2 = imag1b.copy()
imag2[k] = v + 1.0
self.assert_(imag1a == imag1a)
self.assert_(imag1a == imag1b)
self.assert_(imag2 == imag2)
self.assert_(imag1a != imag2)
for opname in ("lt", "le", "gt", "ge"):
for op in opmap[opname]:
self.assertRaises(TypeError, op, imag1a, imag2)
class ListTest(unittest.TestCase):
def assertIs(self, a, b):
self.assert_(a is b)
def test_coverage(self):
# exercise all comparisons for lists
x = [42]
self.assertIs(x<x, False)
self.assertIs(x<=x, True)
self.assertIs(x==x, True)
self.assertIs(x!=x, False)
self.assertIs(x>x, False)
self.assertIs(x>=x, True)
y = [42, 42]
self.assertIs(x<y, True)
self.assertIs(x<=y, True)
self.assertIs(x==y, False)
self.assertIs(x!=y, True)
self.assertIs(x>y, False)
self.assertIs(x>=y, False)
def test_badentry(self):
# make sure that exceptions for item comparison are properly
# propagated in list comparisons
class Exc(Exception):
pass
class Bad:
def __eq__(self, other):
raise Exc
x = [Bad()]
y = [Bad()]
for op in opmap["eq"]:
self.assertRaises(Exc, op, x, y)
def test_goodentry(self):
# This test exercises the final call to PyObject_RichCompare()
# in Objects/listobject.c::list_richcompare()
class Good:
def __lt__(self, other):
return True
x = [Good()]
y = [Good()]
for op in opmap["lt"]:
self.assertIs(op(x, y), True)
def test_main():
test_support.run_unittest(VectorTest, NumberTest, MiscTest, DictTest, ListTest)
if __name__ == "__main__":
test_main()
| lgpl-2.1 |
madj4ck/ansible | lib/ansible/new_inventory/host.py | 236 | 1551 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class Host:
def __init__(self, name):
self._name = name
self._connection = None
self._ipv4_address = ''
self._ipv6_address = ''
self._port = 22
self._vars = dict()
def __repr__(self):
return self.get_name()
def get_name(self):
return self._name
def get_groups(self):
return []
def set_variable(self, name, value):
''' sets a variable for this host '''
self._vars[name] = value
def get_vars(self):
''' returns all variables for this host '''
all_vars = self._vars.copy()
all_vars.update(dict(inventory_hostname=self._name))
return all_vars
| gpl-3.0 |
google-research/lasertagger | run_lasertagger_test.py | 3 | 1196 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import run_lasertagger
import tensorflow as tf
class RunLasertaggerTest(tf.test.TestCase):
def test_step_calculation(self):
num_examples = 10
batch_size = 2
num_epochs = 3
warmup_proportion = 0.5
steps, warmup_steps = run_lasertagger._calculate_steps(
num_examples, batch_size, num_epochs, warmup_proportion)
self.assertEqual(steps, 15)
self.assertEqual(warmup_steps, 7)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
msdx321/android_kernel_samsung_heroXqltechn | lazy-prebuilt/aarch64-linux-android-4.9/share/gdb/python/gdb/command/explore.py | 126 | 26824 | # GDB 'explore' command.
# Copyright (C) 2012-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of the GDB 'explore' command using the GDB Python API."""
import gdb
import sys
if sys.version_info[0] > 2:
# Python 3 renamed raw_input to input
raw_input = input
class Explorer(object):
"""Internal class which invokes other explorers."""
# This map is filled by the Explorer.init_env() function
type_code_to_explorer_map = { }
_SCALAR_TYPE_LIST = (
gdb.TYPE_CODE_CHAR,
gdb.TYPE_CODE_INT,
gdb.TYPE_CODE_BOOL,
gdb.TYPE_CODE_FLT,
gdb.TYPE_CODE_VOID,
gdb.TYPE_CODE_ENUM,
)
@staticmethod
def guard_expr(expr):
length = len(expr)
guard = False
if expr[0] == '(' and expr[length-1] == ')':
pass
else:
i = 0
while i < length:
c = expr[i]
if (c == '_' or ('a' <= c and c <= 'z') or
('A' <= c and c <= 'Z') or ('0' <= c and c <= '9')):
pass
else:
guard = True
break
i += 1
if guard:
return "(" + expr + ")"
else:
return expr
@staticmethod
def explore_expr(expr, value, is_child):
"""Main function to explore an expression value.
Arguments:
expr: The expression string that is being explored.
value: The gdb.Value value of the expression.
is_child: Boolean value to indicate if the expression is a child.
An expression is a child if it is derived from the main
expression entered by the user. For example, if the user
entered an expression which evaluates to a struct, then
when exploring the fields of the struct, is_child is set
to True internally.
Returns:
No return value.
"""
type_code = value.type.code
if type_code in Explorer.type_code_to_explorer_map:
explorer_class = Explorer.type_code_to_explorer_map[type_code]
while explorer_class.explore_expr(expr, value, is_child):
pass
else:
print ("Explorer for type '%s' not yet available.\n" %
str(value.type))
@staticmethod
def explore_type(name, datatype, is_child):
"""Main function to explore a data type.
Arguments:
name: The string representing the path to the data type being
explored.
datatype: The gdb.Type value of the data type being explored.
is_child: Boolean value to indicate if the name is a child.
A name is a child if it is derived from the main name
entered by the user. For example, if the user entered
the name of struct type, then when exploring the fields
of the struct, is_child is set to True internally.
Returns:
No return value.
"""
type_code = datatype.code
if type_code in Explorer.type_code_to_explorer_map:
explorer_class = Explorer.type_code_to_explorer_map[type_code]
while explorer_class.explore_type(name, datatype, is_child):
pass
else:
print ("Explorer for type '%s' not yet available.\n" %
str(datatype))
@staticmethod
def init_env():
"""Initializes the Explorer environment.
This function should be invoked before starting any exploration. If
invoked before an exploration, it need not be invoked for subsequent
explorations.
"""
Explorer.type_code_to_explorer_map = {
gdb.TYPE_CODE_CHAR : ScalarExplorer,
gdb.TYPE_CODE_INT : ScalarExplorer,
gdb.TYPE_CODE_BOOL : ScalarExplorer,
gdb.TYPE_CODE_FLT : ScalarExplorer,
gdb.TYPE_CODE_VOID : ScalarExplorer,
gdb.TYPE_CODE_ENUM : ScalarExplorer,
gdb.TYPE_CODE_STRUCT : CompoundExplorer,
gdb.TYPE_CODE_UNION : CompoundExplorer,
gdb.TYPE_CODE_PTR : PointerExplorer,
gdb.TYPE_CODE_REF : ReferenceExplorer,
gdb.TYPE_CODE_TYPEDEF : TypedefExplorer,
gdb.TYPE_CODE_ARRAY : ArrayExplorer
}
@staticmethod
def is_scalar_type(type):
"""Checks whether a type is a scalar type.
A type is a scalar type of its type is
gdb.TYPE_CODE_CHAR or
gdb.TYPE_CODE_INT or
gdb.TYPE_CODE_BOOL or
gdb.TYPE_CODE_FLT or
gdb.TYPE_CODE_VOID or
gdb.TYPE_CODE_ENUM.
Arguments:
type: The type to be checked.
Returns:
'True' if 'type' is a scalar type. 'False' otherwise.
"""
return type.code in Explorer._SCALAR_TYPE_LIST
@staticmethod
def return_to_parent_value():
"""A utility function which prints that the current exploration session
is returning to the parent value. Useful when exploring values.
"""
print ("\nReturning to parent value...\n")
@staticmethod
def return_to_parent_value_prompt():
"""A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the parent value.
Useful when exploring values.
"""
raw_input("\nPress enter to return to parent value: ")
@staticmethod
def return_to_enclosing_type():
"""A utility function which prints that the current exploration session
is returning to the enclosing type. Useful when exploring types.
"""
print ("\nReturning to enclosing type...\n")
@staticmethod
def return_to_enclosing_type_prompt():
"""A utility function which prompts the user to press the 'enter' key
so that the exploration session can shift back to the enclosing type.
Useful when exploring types.
"""
raw_input("\nPress enter to return to enclosing type: ")
class ScalarExplorer(object):
"""Internal class used to explore scalar values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore scalar values.
See Explorer.explore_expr and Explorer.is_scalar_type for more
information.
"""
print ("'%s' is a scalar value of type '%s'." %
(expr, value.type))
print ("%s = %s" % (expr, str(value)))
if is_child:
Explorer.return_to_parent_value_prompt()
Explorer.return_to_parent_value()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore scalar types.
See Explorer.explore_type and Explorer.is_scalar_type for more
information.
"""
if datatype.code == gdb.TYPE_CODE_ENUM:
if is_child:
print ("%s is of an enumerated type '%s'." %
(name, str(datatype)))
else:
print ("'%s' is an enumerated type." % name)
else:
if is_child:
print ("%s is of a scalar type '%s'." %
(name, str(datatype)))
else:
print ("'%s' is a scalar type." % name)
if is_child:
Explorer.return_to_enclosing_type_prompt()
Explorer.return_to_enclosing_type()
return False
class PointerExplorer(object):
"""Internal class used to explore pointer values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore pointer values.
See Explorer.explore_expr for more information.
"""
print ("'%s' is a pointer to a value of type '%s'" %
(expr, str(value.type.target())))
option = raw_input("Continue exploring it as a pointer to a single "
"value [y/n]: ")
if option == "y":
deref_value = None
try:
deref_value = value.dereference()
str(deref_value)
except gdb.MemoryError:
print ("'%s' a pointer pointing to an invalid memory "
"location." % expr)
if is_child:
Explorer.return_to_parent_value_prompt()
return False
Explorer.explore_expr("*%s" % Explorer.guard_expr(expr),
deref_value, is_child)
return False
option = raw_input("Continue exploring it as a pointer to an "
"array [y/n]: ")
if option == "y":
while True:
index = 0
try:
index = int(raw_input("Enter the index of the element you "
"want to explore in '%s': " % expr))
except ValueError:
break
element_expr = "%s[%d]" % (Explorer.guard_expr(expr), index)
element = value[index]
try:
str(element)
except gdb.MemoryError:
print ("Cannot read value at index %d." % index)
continue
Explorer.explore_expr(element_expr, element, True)
return False
if is_child:
Explorer.return_to_parent_value()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore pointer types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
print ("\n%s is a pointer to a value of type '%s'." %
(name, str(target_type)))
Explorer.explore_type("the pointee type of %s" % name,
target_type,
is_child)
return False
class ReferenceExplorer(object):
"""Internal class used to explore reference (TYPE_CODE_REF) values."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore array values.
See Explorer.explore_expr for more information.
"""
referenced_value = value.referenced_value()
Explorer.explore_expr(expr, referenced_value, is_child)
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore pointer types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
Explorer.explore_type(name, target_type, is_child)
return False
class ArrayExplorer(object):
"""Internal class used to explore arrays."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore array values.
See Explorer.explore_expr for more information.
"""
target_type = value.type.target()
print ("'%s' is an array of '%s'." % (expr, str(target_type)))
index = 0
try:
index = int(raw_input("Enter the index of the element you want to "
"explore in '%s': " % expr))
except ValueError:
if is_child:
Explorer.return_to_parent_value()
return False
element = None
try:
element = value[index]
str(element)
except gdb.MemoryError:
print ("Cannot read value at index %d." % index)
raw_input("Press enter to continue... ")
return True
Explorer.explore_expr("%s[%d]" % (Explorer.guard_expr(expr), index),
element, True)
return True
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore array types.
See Explorer.explore_type for more information.
"""
target_type = datatype.target()
print ("%s is an array of '%s'." % (name, str(target_type)))
Explorer.explore_type("the array element of %s" % name, target_type,
is_child)
return False
class CompoundExplorer(object):
"""Internal class used to explore struct, classes and unions."""
@staticmethod
def _print_fields(print_list):
"""Internal function which prints the fields of a struct/class/union.
"""
max_field_name_length = 0
for pair in print_list:
if max_field_name_length < len(pair[0]):
max_field_name_length = len(pair[0])
for pair in print_list:
print (" %*s = %s" % (max_field_name_length, pair[0], pair[1]))
@staticmethod
def _get_real_field_count(fields):
real_field_count = 0;
for field in fields:
if not field.artificial:
real_field_count = real_field_count + 1
return real_field_count
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore structs/classes and union values.
See Explorer.explore_expr for more information.
"""
datatype = value.type
type_code = datatype.code
fields = datatype.fields()
if type_code == gdb.TYPE_CODE_STRUCT:
type_desc = "struct/class"
else:
type_desc = "union"
if CompoundExplorer._get_real_field_count(fields) == 0:
print ("The value of '%s' is a %s of type '%s' with no fields." %
(expr, type_desc, str(value.type)))
if is_child:
Explorer.return_to_parent_value_prompt()
return False
print ("The value of '%s' is a %s of type '%s' with the following "
"fields:\n" % (expr, type_desc, str(value.type)))
has_explorable_fields = False
choice_to_compound_field_map = { }
current_choice = 0
print_list = [ ]
for field in fields:
if field.artificial:
continue
field_full_name = Explorer.guard_expr(expr) + "." + field.name
if field.is_base_class:
field_value = value.cast(field.type)
else:
field_value = value[field.name]
literal_value = ""
if type_code == gdb.TYPE_CODE_UNION:
literal_value = ("<Enter %d to explore this field of type "
"'%s'>" % (current_choice, str(field.type)))
has_explorable_fields = True
else:
if Explorer.is_scalar_type(field.type):
literal_value = ("%s .. (Value of type '%s')" %
(str(field_value), str(field.type)))
else:
if field.is_base_class:
field_desc = "base class"
else:
field_desc = "field"
literal_value = ("<Enter %d to explore this %s of type "
"'%s'>" %
(current_choice, field_desc,
str(field.type)))
has_explorable_fields = True
choice_to_compound_field_map[str(current_choice)] = (
field_full_name, field_value)
current_choice = current_choice + 1
print_list.append((field.name, literal_value))
CompoundExplorer._print_fields(print_list)
print ("")
if has_explorable_fields:
choice = raw_input("Enter the field number of choice: ")
if choice in choice_to_compound_field_map:
Explorer.explore_expr(choice_to_compound_field_map[choice][0],
choice_to_compound_field_map[choice][1],
True)
return True
else:
if is_child:
Explorer.return_to_parent_value()
else:
if is_child:
Explorer.return_to_parent_value_prompt()
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore struct/class and union types.
See Explorer.explore_type for more information.
"""
type_code = datatype.code
type_desc = ""
if type_code == gdb.TYPE_CODE_STRUCT:
type_desc = "struct/class"
else:
type_desc = "union"
fields = datatype.fields()
if CompoundExplorer._get_real_field_count(fields) == 0:
if is_child:
print ("%s is a %s of type '%s' with no fields." %
(name, type_desc, str(datatype)))
Explorer.return_to_enclosing_type_prompt()
else:
print ("'%s' is a %s with no fields." % (name, type_desc))
return False
if is_child:
print ("%s is a %s of type '%s' "
"with the following fields:\n" %
(name, type_desc, str(datatype)))
else:
print ("'%s' is a %s with the following "
"fields:\n" %
(name, type_desc))
has_explorable_fields = False
current_choice = 0
choice_to_compound_field_map = { }
print_list = [ ]
for field in fields:
if field.artificial:
continue
if field.is_base_class:
field_desc = "base class"
else:
field_desc = "field"
rhs = ("<Enter %d to explore this %s of type '%s'>" %
(current_choice, field_desc, str(field.type)))
print_list.append((field.name, rhs))
choice_to_compound_field_map[str(current_choice)] = (
field.name, field.type, field_desc)
current_choice = current_choice + 1
CompoundExplorer._print_fields(print_list)
print ("")
if len(choice_to_compound_field_map) > 0:
choice = raw_input("Enter the field number of choice: ")
if choice in choice_to_compound_field_map:
if is_child:
new_name = ("%s '%s' of %s" %
(choice_to_compound_field_map[choice][2],
choice_to_compound_field_map[choice][0],
name))
else:
new_name = ("%s '%s' of '%s'" %
(choice_to_compound_field_map[choice][2],
choice_to_compound_field_map[choice][0],
name))
Explorer.explore_type(new_name,
choice_to_compound_field_map[choice][1], True)
return True
else:
if is_child:
Explorer.return_to_enclosing_type()
else:
if is_child:
Explorer.return_to_enclosing_type_prompt()
return False
class TypedefExplorer(object):
"""Internal class used to explore values whose type is a typedef."""
@staticmethod
def explore_expr(expr, value, is_child):
"""Function to explore typedef values.
See Explorer.explore_expr for more information.
"""
actual_type = value.type.strip_typedefs()
print ("The value of '%s' is of type '%s' "
"which is a typedef of type '%s'" %
(expr, str(value.type), str(actual_type)))
Explorer.explore_expr(expr, value.cast(actual_type), is_child)
return False
@staticmethod
def explore_type(name, datatype, is_child):
"""Function to explore typedef types.
See Explorer.explore_type for more information.
"""
actual_type = datatype.strip_typedefs()
if is_child:
print ("The type of %s is a typedef of type '%s'." %
(name, str(actual_type)))
else:
print ("The type '%s' is a typedef of type '%s'." %
(name, str(actual_type)))
Explorer.explore_type(name, actual_type, is_child)
return False
class ExploreUtils(object):
"""Internal class which provides utilities for the main command classes."""
@staticmethod
def check_args(name, arg_str):
"""Utility to check if adequate number of arguments are passed to an
explore command.
Arguments:
name: The name of the explore command.
arg_str: The argument string passed to the explore command.
Returns:
True if adequate arguments are passed, false otherwise.
Raises:
gdb.GdbError if adequate arguments are not passed.
"""
if len(arg_str) < 1:
raise gdb.GdbError("ERROR: '%s' requires an argument."
% name)
return False
else:
return True
@staticmethod
def get_type_from_str(type_str):
"""A utility function to deduce the gdb.Type value from a string
representing the type.
Arguments:
type_str: The type string from which the gdb.Type value should be
deduced.
Returns:
The deduced gdb.Type value if possible, None otherwise.
"""
try:
# Assume the current language to be C/C++ and make a try.
return gdb.parse_and_eval("(%s *)0" % type_str).type.target()
except RuntimeError:
# If assumption of current language to be C/C++ was wrong, then
# lookup the type using the API.
try:
return gdb.lookup_type(type_str)
except RuntimeError:
return None
@staticmethod
def get_value_from_str(value_str):
"""A utility function to deduce the gdb.Value value from a string
representing the value.
Arguments:
value_str: The value string from which the gdb.Value value should
be deduced.
Returns:
The deduced gdb.Value value if possible, None otherwise.
"""
try:
return gdb.parse_and_eval(value_str)
except RuntimeError:
return None
class ExploreCommand(gdb.Command):
"""Explore a value or a type valid in the current context.
Usage:
explore ARG
- ARG is either a valid expression or a type name.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing type or value.
"""
def __init__(self):
super(ExploreCommand, self).__init__(name = "explore",
command_class = gdb.COMMAND_DATA,
prefix = True)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore", arg_str) == False:
return
# Check if it is a value
value = ExploreUtils.get_value_from_str(arg_str)
if value is not None:
Explorer.explore_expr(arg_str, value, False)
return
# If it is not a value, check if it is a type
datatype = ExploreUtils.get_type_from_str(arg_str)
if datatype is not None:
Explorer.explore_type(arg_str, datatype, False)
return
# If it is neither a value nor a type, raise an error.
raise gdb.GdbError(
("'%s' neither evaluates to a value nor is a type "
"in the current context." %
arg_str))
class ExploreValueCommand(gdb.Command):
"""Explore value of an expression valid in the current context.
Usage:
explore value ARG
- ARG is a valid expression.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing value.
"""
def __init__(self):
super(ExploreValueCommand, self).__init__(
name = "explore value", command_class = gdb.COMMAND_DATA)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore value", arg_str) == False:
return
value = ExploreUtils.get_value_from_str(arg_str)
if value is None:
raise gdb.GdbError(
(" '%s' does not evaluate to a value in the current "
"context." %
arg_str))
return
Explorer.explore_expr(arg_str, value, False)
class ExploreTypeCommand(gdb.Command):
"""Explore a type or the type of an expression valid in the current
context.
Usage:
explore type ARG
- ARG is a valid expression or a type name.
- At any stage of exploration, hit the return key (instead of a
choice, if any) to return to the enclosing type.
"""
def __init__(self):
super(ExploreTypeCommand, self).__init__(
name = "explore type", command_class = gdb.COMMAND_DATA)
def invoke(self, arg_str, from_tty):
if ExploreUtils.check_args("explore type", arg_str) == False:
return
datatype = ExploreUtils.get_type_from_str(arg_str)
if datatype is not None:
Explorer.explore_type(arg_str, datatype, False)
return
value = ExploreUtils.get_value_from_str(arg_str)
if value is not None:
print ("'%s' is of type '%s'." % (arg_str, str(value.type)))
Explorer.explore_type(str(value.type), value.type, False)
return
raise gdb.GdbError(("'%s' is not a type or value in the current "
"context." % arg_str))
Explorer.init_env()
ExploreCommand()
ExploreValueCommand()
ExploreTypeCommand()
| gpl-2.0 |
broferek/ansible | test/units/modules/network/fortios/test_fortios_system_sdn_connector.py | 21 | 19557 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_sdn_connector
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_sdn_connector.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_sdn_connector_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_sdn_connector': {
'access_key': 'test_value_3',
'azure_region': 'global',
'client_id': 'test_value_5',
'client_secret': 'test_value_6',
'compartment_id': 'test_value_7',
'gcp_project': 'test_value_8',
'key_passwd': 'test_value_9',
'login_endpoint': 'test_value_10',
'name': 'default_name_11',
'oci_cert': 'test_value_12',
'oci_fingerprint': 'test_value_13',
'oci_region': 'phoenix',
'password': 'test_value_15',
'private_key': 'test_value_16',
'region': 'test_value_17',
'resource_group': 'test_value_18',
'resource_url': 'test_value_19',
'secret_key': 'test_value_20',
'server': '192.168.100.21',
'server_port': '22',
'service_account': 'test_value_23',
'status': 'disable',
'subscription_id': 'test_value_25',
'tenant_id': 'test_value_26',
'type': 'aci',
'update_interval': '28',
'use_metadata_iam': 'disable',
'user_id': 'test_value_30',
'username': 'test_value_31',
'vpc_id': 'test_value_32'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_sdn_connector.fortios_system(input_data, fos_instance)
expected_data = {
'access-key': 'test_value_3',
'azure-region': 'global',
'client-id': 'test_value_5',
'client-secret': 'test_value_6',
'compartment-id': 'test_value_7',
'gcp-project': 'test_value_8',
'key-passwd': 'test_value_9',
'login-endpoint': 'test_value_10',
'name': 'default_name_11',
'oci-cert': 'test_value_12',
'oci-fingerprint': 'test_value_13',
'oci-region': 'phoenix',
'password': 'test_value_15',
'private-key': 'test_value_16',
'region': 'test_value_17',
'resource-group': 'test_value_18',
'resource-url': 'test_value_19',
'secret-key': 'test_value_20',
'server': '192.168.100.21',
'server-port': '22',
'service-account': 'test_value_23',
'status': 'disable',
'subscription-id': 'test_value_25',
'tenant-id': 'test_value_26',
'type': 'aci',
'update-interval': '28',
'use-metadata-iam': 'disable',
'user-id': 'test_value_30',
'username': 'test_value_31',
'vpc-id': 'test_value_32'
}
set_method_mock.assert_called_with('system', 'sdn-connector', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_sdn_connector_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_sdn_connector': {
'access_key': 'test_value_3',
'azure_region': 'global',
'client_id': 'test_value_5',
'client_secret': 'test_value_6',
'compartment_id': 'test_value_7',
'gcp_project': 'test_value_8',
'key_passwd': 'test_value_9',
'login_endpoint': 'test_value_10',
'name': 'default_name_11',
'oci_cert': 'test_value_12',
'oci_fingerprint': 'test_value_13',
'oci_region': 'phoenix',
'password': 'test_value_15',
'private_key': 'test_value_16',
'region': 'test_value_17',
'resource_group': 'test_value_18',
'resource_url': 'test_value_19',
'secret_key': 'test_value_20',
'server': '192.168.100.21',
'server_port': '22',
'service_account': 'test_value_23',
'status': 'disable',
'subscription_id': 'test_value_25',
'tenant_id': 'test_value_26',
'type': 'aci',
'update_interval': '28',
'use_metadata_iam': 'disable',
'user_id': 'test_value_30',
'username': 'test_value_31',
'vpc_id': 'test_value_32'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_sdn_connector.fortios_system(input_data, fos_instance)
expected_data = {
'access-key': 'test_value_3',
'azure-region': 'global',
'client-id': 'test_value_5',
'client-secret': 'test_value_6',
'compartment-id': 'test_value_7',
'gcp-project': 'test_value_8',
'key-passwd': 'test_value_9',
'login-endpoint': 'test_value_10',
'name': 'default_name_11',
'oci-cert': 'test_value_12',
'oci-fingerprint': 'test_value_13',
'oci-region': 'phoenix',
'password': 'test_value_15',
'private-key': 'test_value_16',
'region': 'test_value_17',
'resource-group': 'test_value_18',
'resource-url': 'test_value_19',
'secret-key': 'test_value_20',
'server': '192.168.100.21',
'server-port': '22',
'service-account': 'test_value_23',
'status': 'disable',
'subscription-id': 'test_value_25',
'tenant-id': 'test_value_26',
'type': 'aci',
'update-interval': '28',
'use-metadata-iam': 'disable',
'user-id': 'test_value_30',
'username': 'test_value_31',
'vpc-id': 'test_value_32'
}
set_method_mock.assert_called_with('system', 'sdn-connector', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_sdn_connector_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_sdn_connector': {
'access_key': 'test_value_3',
'azure_region': 'global',
'client_id': 'test_value_5',
'client_secret': 'test_value_6',
'compartment_id': 'test_value_7',
'gcp_project': 'test_value_8',
'key_passwd': 'test_value_9',
'login_endpoint': 'test_value_10',
'name': 'default_name_11',
'oci_cert': 'test_value_12',
'oci_fingerprint': 'test_value_13',
'oci_region': 'phoenix',
'password': 'test_value_15',
'private_key': 'test_value_16',
'region': 'test_value_17',
'resource_group': 'test_value_18',
'resource_url': 'test_value_19',
'secret_key': 'test_value_20',
'server': '192.168.100.21',
'server_port': '22',
'service_account': 'test_value_23',
'status': 'disable',
'subscription_id': 'test_value_25',
'tenant_id': 'test_value_26',
'type': 'aci',
'update_interval': '28',
'use_metadata_iam': 'disable',
'user_id': 'test_value_30',
'username': 'test_value_31',
'vpc_id': 'test_value_32'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_sdn_connector.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'sdn-connector', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_sdn_connector_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_sdn_connector': {
'access_key': 'test_value_3',
'azure_region': 'global',
'client_id': 'test_value_5',
'client_secret': 'test_value_6',
'compartment_id': 'test_value_7',
'gcp_project': 'test_value_8',
'key_passwd': 'test_value_9',
'login_endpoint': 'test_value_10',
'name': 'default_name_11',
'oci_cert': 'test_value_12',
'oci_fingerprint': 'test_value_13',
'oci_region': 'phoenix',
'password': 'test_value_15',
'private_key': 'test_value_16',
'region': 'test_value_17',
'resource_group': 'test_value_18',
'resource_url': 'test_value_19',
'secret_key': 'test_value_20',
'server': '192.168.100.21',
'server_port': '22',
'service_account': 'test_value_23',
'status': 'disable',
'subscription_id': 'test_value_25',
'tenant_id': 'test_value_26',
'type': 'aci',
'update_interval': '28',
'use_metadata_iam': 'disable',
'user_id': 'test_value_30',
'username': 'test_value_31',
'vpc_id': 'test_value_32'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_sdn_connector.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'sdn-connector', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_sdn_connector_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_sdn_connector': {
'access_key': 'test_value_3',
'azure_region': 'global',
'client_id': 'test_value_5',
'client_secret': 'test_value_6',
'compartment_id': 'test_value_7',
'gcp_project': 'test_value_8',
'key_passwd': 'test_value_9',
'login_endpoint': 'test_value_10',
'name': 'default_name_11',
'oci_cert': 'test_value_12',
'oci_fingerprint': 'test_value_13',
'oci_region': 'phoenix',
'password': 'test_value_15',
'private_key': 'test_value_16',
'region': 'test_value_17',
'resource_group': 'test_value_18',
'resource_url': 'test_value_19',
'secret_key': 'test_value_20',
'server': '192.168.100.21',
'server_port': '22',
'service_account': 'test_value_23',
'status': 'disable',
'subscription_id': 'test_value_25',
'tenant_id': 'test_value_26',
'type': 'aci',
'update_interval': '28',
'use_metadata_iam': 'disable',
'user_id': 'test_value_30',
'username': 'test_value_31',
'vpc_id': 'test_value_32'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_sdn_connector.fortios_system(input_data, fos_instance)
expected_data = {
'access-key': 'test_value_3',
'azure-region': 'global',
'client-id': 'test_value_5',
'client-secret': 'test_value_6',
'compartment-id': 'test_value_7',
'gcp-project': 'test_value_8',
'key-passwd': 'test_value_9',
'login-endpoint': 'test_value_10',
'name': 'default_name_11',
'oci-cert': 'test_value_12',
'oci-fingerprint': 'test_value_13',
'oci-region': 'phoenix',
'password': 'test_value_15',
'private-key': 'test_value_16',
'region': 'test_value_17',
'resource-group': 'test_value_18',
'resource-url': 'test_value_19',
'secret-key': 'test_value_20',
'server': '192.168.100.21',
'server-port': '22',
'service-account': 'test_value_23',
'status': 'disable',
'subscription-id': 'test_value_25',
'tenant-id': 'test_value_26',
'type': 'aci',
'update-interval': '28',
'use-metadata-iam': 'disable',
'user-id': 'test_value_30',
'username': 'test_value_31',
'vpc-id': 'test_value_32'
}
set_method_mock.assert_called_with('system', 'sdn-connector', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_sdn_connector_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_sdn_connector': {
'random_attribute_not_valid': 'tag',
'access_key': 'test_value_3',
'azure_region': 'global',
'client_id': 'test_value_5',
'client_secret': 'test_value_6',
'compartment_id': 'test_value_7',
'gcp_project': 'test_value_8',
'key_passwd': 'test_value_9',
'login_endpoint': 'test_value_10',
'name': 'default_name_11',
'oci_cert': 'test_value_12',
'oci_fingerprint': 'test_value_13',
'oci_region': 'phoenix',
'password': 'test_value_15',
'private_key': 'test_value_16',
'region': 'test_value_17',
'resource_group': 'test_value_18',
'resource_url': 'test_value_19',
'secret_key': 'test_value_20',
'server': '192.168.100.21',
'server_port': '22',
'service_account': 'test_value_23',
'status': 'disable',
'subscription_id': 'test_value_25',
'tenant_id': 'test_value_26',
'type': 'aci',
'update_interval': '28',
'use_metadata_iam': 'disable',
'user_id': 'test_value_30',
'username': 'test_value_31',
'vpc_id': 'test_value_32'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_sdn_connector.fortios_system(input_data, fos_instance)
expected_data = {
'access-key': 'test_value_3',
'azure-region': 'global',
'client-id': 'test_value_5',
'client-secret': 'test_value_6',
'compartment-id': 'test_value_7',
'gcp-project': 'test_value_8',
'key-passwd': 'test_value_9',
'login-endpoint': 'test_value_10',
'name': 'default_name_11',
'oci-cert': 'test_value_12',
'oci-fingerprint': 'test_value_13',
'oci-region': 'phoenix',
'password': 'test_value_15',
'private-key': 'test_value_16',
'region': 'test_value_17',
'resource-group': 'test_value_18',
'resource-url': 'test_value_19',
'secret-key': 'test_value_20',
'server': '192.168.100.21',
'server-port': '22',
'service-account': 'test_value_23',
'status': 'disable',
'subscription-id': 'test_value_25',
'tenant-id': 'test_value_26',
'type': 'aci',
'update-interval': '28',
'use-metadata-iam': 'disable',
'user-id': 'test_value_30',
'username': 'test_value_31',
'vpc-id': 'test_value_32'
}
set_method_mock.assert_called_with('system', 'sdn-connector', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
ztp-at/RKSV | librksv/test/verification_proxy.py | 1 | 2681 | ###########################################################################
# Copyright 2017 ZT Prentner IT GmbH (www.ztp.at)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
from ..gettext_helper import _
class RKSVVerificationProxyI(object):
def verify(self, fd, keyStore, aesKey, inState, registerIdx, chunksize):
raise NotImplementedError("Please implement this yourself.")
from sys import version_info
if version_info[0] < 3:
import __builtin__
else:
import builtins as __builtin__
from .. import depparser
from .. import key_store
from .. import receipt
from .. import verification_state
from .. import verify
from .. import verify_receipt
class LibRKSVVerificationProxy(RKSVVerificationProxyI):
def __init__(self, pool, nprocs):
self.pool = pool
self.nprocs = nprocs
def verify(self, fd, keyStore, aesKey, inState, registerIdx, chunksize):
# Save the _() function.
trvec = (
__builtin__._ ,
depparser._,
key_store._,
receipt._,
verification_state._,
verify._,
verify_receipt._,
)
# Temporarily disable translations to make sure error
# messages match.
(
__builtin__._ ,
depparser._,
key_store._,
receipt._,
verification_state._,
verify._,
verify_receipt._,
) = [lambda x: x] * len(trvec)
try:
parser = depparser.IncrementalDEPParser.fromFd(fd, True)
outState = verify.verifyParsedDEP(parser, keyStore, aesKey, inState,
registerIdx, self.pool, self.nprocs, chunksize)
finally:
(
__builtin__._ ,
depparser._,
key_store._,
receipt._,
verification_state._,
verify._,
verify_receipt._,
) = trvec
return outState
| agpl-3.0 |
zmwangx/you-get | src/you_get/extractors/qq_egame.py | 2 | 1652 | import re
import json
from ..common import get_content
from ..extractors import VideoExtractor
from ..util import log
from ..util.strings import unescape_html
__all__ = ['qq_egame_download']
class QQEgame(VideoExtractor):
stream_types = [
{'id': 'original', 'video_profile': '0', 'container': 'flv'},
{'id': '900', 'video_profile': '900kb/s', 'container': 'flv'},
{'id': '550', 'video_profile': '550kb/s', 'container': 'flv'}
]
name = 'QQEgame'
def prepare(self, **kwargs):
page = get_content(self.url)
server_data = re.search(r'serverData\s*=\s*({.+?});', page)
if server_data is None:
log.wtf('cannot find server_data')
json_data = json.loads(server_data.group(1))
live_info = json_data['liveInfo']['data']
self.title = '{}_{}'.format(live_info['profileInfo']['nickName'], live_info['videoInfo']['title'])
for exsited_stream in live_info['videoInfo']['streamInfos']:
for s in self.__class__.stream_types:
if re.search(r'(\d+)', s['video_profile']).group(1) == exsited_stream['bitrate']:
current_stream_id = s['id']
stream_info = dict(src=[unescape_html(exsited_stream['playUrl'])])
stream_info['video_profile'] = exsited_stream['desc']
stream_info['container'] = s['container']
stream_info['size'] = float('inf')
self.streams[current_stream_id] = stream_info
def qq_egame_download(url, **kwargs):
QQEgame().download_by_url(url, **kwargs)
# url dispatching has been done in qq.py
| mit |
joefutrelle/domdb | utils.py | 1 | 1859 | def rpad(s,l,pad_string=' '):
return s + (pad_string * (l - len(s)))
def asciitable(dicts,disp_cols=None,none_msg=None,border=True):
"""produce an ASCII formatted columnar table from the dicts"""
dicts = list(dicts)
if not dicts:
if none_msg is not None:
yield none_msg
return
if disp_cols is not None:
cols = disp_cols
else:
# set of all keys in dicts
cols = sorted(list(set(reduce(lambda x,y: x+y, [d.keys() for d in dicts]))))
# compute col widths. initially wide enough for the column label
widths = dict([(col,len(col)) for col in cols])
# now create rows, and in doing so compute max width of each column
for row in list(dicts):
for col in cols:
try:
width = len(str(row[col]))
except KeyError:
width = 0
if width > widths[col]:
widths[col] = width
def bord(line,border_char='|',pad_char=' '):
if border:
return border_char + pad_char + line + pad_char + border_char
else:
return line
# now print rows
spacer = bord('-+-'.join(['-' * widths[col] for col in cols]),'+','-')
if border:
yield spacer
yield bord(' | '.join([rpad(col,widths[col]) for col in cols]),'|')
yield spacer
for row in dicts:
yield bord(' | '.join([rpad(str(row[col]),widths[col]) for col in cols]),'|')
if border:
yield spacer
def resultproxy2asciitable(r,empty_message='No rows'):
"""yields an asciitable representation of an SQLAlchemy ResultProxy"""
cols = []
row_proxies = r.fetchall()
rows = []
for r in row_proxies:
if not cols:
cols = r.keys()
rows.append(dict(r.items()))
for line in asciitable(rows,cols,empty_message):
print line
| mit |
drnextgis/QGIS | python/plugins/processing/algs/grass7/Grass7Utils.py | 1 | 16129 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GrassUtils.py
---------------------
Date : February 2015
Copyright : (C) 2014-2015 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
from builtins import object
__author__ = 'Victor Olaya'
__date__ = 'February 2015'
__copyright__ = '(C) 2014-2015, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import stat
import shutil
import subprocess
import os
from qgis.core import QgsApplication
from qgis.PyQt.QtCore import QCoreApplication
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import userFolder, isWindows, isMac, tempFolder, mkdir
from processing.tests.TestData import points
class Grass7Utils(object):
GRASS_REGION_XMIN = 'GRASS7_REGION_XMIN'
GRASS_REGION_YMIN = 'GRASS7_REGION_YMIN'
GRASS_REGION_XMAX = 'GRASS7_REGION_XMAX'
GRASS_REGION_YMAX = 'GRASS7_REGION_YMAX'
GRASS_REGION_CELLSIZE = 'GRASS7_REGION_CELLSIZE'
GRASS_FOLDER = 'GRASS7_FOLDER'
GRASS_LOG_COMMANDS = 'GRASS7_LOG_COMMANDS'
GRASS_LOG_CONSOLE = 'GRASS7_LOG_CONSOLE'
sessionRunning = False
sessionLayers = {}
projectionSet = False
isGrass7Installed = False
@staticmethod
def grassBatchJobFilename():
'''This is used in Linux. This is the batch job that we assign to
GRASS_BATCH_JOB and then call GRASS and let it do the work
'''
filename = 'grass7_batch_job.sh'
batchfile = os.path.join(userFolder(), filename)
return batchfile
@staticmethod
def grassScriptFilename():
'''This is used in windows. We create a script that initializes
GRASS and then uses grass commands
'''
filename = 'grass7_script.bat'
filename = os.path.join(userFolder(), filename)
return filename
@staticmethod
def getGrassVersion():
# FIXME: I do not know if this should be removed or let the user enter it
# or something like that... This is just a temporary thing
return '7.0.0'
@staticmethod
def grassPath():
if not isWindows() and not isMac():
return ''
folder = ProcessingConfig.getSetting(Grass7Utils.GRASS_FOLDER) or ''
if not os.path.exists(folder):
folder = None
if folder is None:
if isWindows():
if "OSGEO4W_ROOT" in os.environ:
testfolder = os.path.join(str(os.environ['OSGEO4W_ROOT']), "apps")
else:
testfolder = str(QgsApplication.prefixPath())
testfolder = os.path.join(testfolder, 'grass')
if os.path.isdir(testfolder):
for subfolder in os.listdir(testfolder):
if subfolder.startswith('grass-7'):
folder = os.path.join(testfolder, subfolder)
break
else:
folder = os.path.join(str(QgsApplication.prefixPath()), 'grass7')
if not os.path.isdir(folder):
folder = '/Applications/GRASS-7.0.app/Contents/MacOS'
return folder or ''
@staticmethod
def grassDescriptionPath():
return os.path.join(os.path.dirname(__file__), 'description')
@staticmethod
def createGrass7Script(commands):
folder = Grass7Utils.grassPath()
script = Grass7Utils.grassScriptFilename()
gisrc = os.path.join(userFolder(), 'processing.gisrc7') # FIXME: use temporary file
# Temporary gisrc file
with open(gisrc, 'w') as output:
location = 'temp_location'
gisdbase = Grass7Utils.grassDataFolder()
output.write('GISDBASE: ' + gisdbase + '\n')
output.write('LOCATION_NAME: ' + location + '\n')
output.write('MAPSET: PERMANENT \n')
output.write('GRASS_GUI: text\n')
with open(script, 'w') as output:
output.write('set HOME=' + os.path.expanduser('~') + '\n')
output.write('set GISRC=' + gisrc + '\n')
output.write('set WINGISBASE=' + folder + '\n')
output.write('set GISBASE=' + folder + '\n')
output.write('set GRASS_PROJSHARE=' + os.path.join(folder, 'share', 'proj') + '\n')
output.write('set GRASS_MESSAGE_FORMAT=plain\n')
# Replacement code for etc/Init.bat
output.write('if "%GRASS_ADDON_PATH%"=="" set PATH=%WINGISBASE%\\bin;%WINGISBASE%\\lib;%PATH%\n')
output.write('if not "%GRASS_ADDON_PATH%"=="" set PATH=%WINGISBASE%\\bin;%WINGISBASE%\\lib;%GRASS_ADDON_PATH%;%PATH%\n')
output.write('\n')
output.write('set GRASS_VERSION=' + Grass7Utils.getGrassVersion() + '\n')
output.write('if not "%LANG%"=="" goto langset\n')
output.write('FOR /F "usebackq delims==" %%i IN (`"%WINGISBASE%\\etc\\winlocale"`) DO @set LANG=%%i\n')
output.write(':langset\n')
output.write('\n')
output.write('set PATHEXT=%PATHEXT%;.PY\n')
output.write('set PYTHONPATH=%PYTHONPATH%;%WINGISBASE%\\etc\\python;%WINGISBASE%\\etc\\wxpython\\n')
output.write('\n')
output.write('g.gisenv.exe set="MAPSET=PERMANENT"\n')
output.write('g.gisenv.exe set="LOCATION=' + location + '"\n')
output.write('g.gisenv.exe set="LOCATION_NAME=' + location + '"\n')
output.write('g.gisenv.exe set="GISDBASE=' + gisdbase + '"\n')
output.write('g.gisenv.exe set="GRASS_GUI=text"\n')
for command in commands:
Grass7Utils.writeCommand(output, command)
output.write('\n')
output.write('exit\n')
@staticmethod
def createGrass7BatchJobFileFromGrass7Commands(commands):
with open(Grass7Utils.grassBatchJobFilename(), 'w') as fout:
for command in commands:
Grass7Utils.writeCommand(fout, command)
fout.write('exit')
@staticmethod
def grassMapsetFolder():
folder = os.path.join(Grass7Utils.grassDataFolder(), 'temp_location')
mkdir(folder)
return folder
@staticmethod
def grassDataFolder():
tempfolder = os.path.join(tempFolder(), 'grassdata')
mkdir(tempfolder)
return tempfolder
@staticmethod
def createTempMapset():
'''Creates a temporary location and mapset(s) for GRASS data
processing. A minimal set of folders and files is created in the
system's default temporary directory. The settings files are
written with sane defaults, so GRASS can do its work. The mapset
projection will be set later, based on the projection of the first
input image or vector
'''
folder = Grass7Utils.grassMapsetFolder()
mkdir(os.path.join(folder, 'PERMANENT'))
mkdir(os.path.join(folder, 'PERMANENT', '.tmp'))
Grass7Utils.writeGrass7Window(os.path.join(folder, 'PERMANENT', 'DEFAULT_WIND'))
with open(os.path.join(folder, 'PERMANENT', 'MYNAME'), 'w') as outfile:
outfile.write(
'QGIS GRASS GIS 7 interface: temporary data processing location.\n')
Grass7Utils.writeGrass7Window(os.path.join(folder, 'PERMANENT', 'WIND'))
mkdir(os.path.join(folder, 'PERMANENT', 'sqlite'))
with open(os.path.join(folder, 'PERMANENT', 'VAR'), 'w') as outfile:
outfile.write('DB_DRIVER: sqlite\n')
outfile.write('DB_DATABASE: $GISDBASE/$LOCATION_NAME/$MAPSET/sqlite/sqlite.db\n')
@staticmethod
def writeGrass7Window(filename):
with open(filename, 'w') as out:
out.write('proj: 0\n')
out.write('zone: 0\n')
out.write('north: 1\n')
out.write('south: 0\n')
out.write('east: 1\n')
out.write('west: 0\n')
out.write('cols: 1\n')
out.write('rows: 1\n')
out.write('e-w resol: 1\n')
out.write('n-s resol: 1\n')
out.write('top: 1\n')
out.write('bottom: 0\n')
out.write('cols3: 1\n')
out.write('rows3: 1\n')
out.write('depths: 1\n')
out.write('e-w resol3: 1\n')
out.write('n-s resol3: 1\n')
out.write('t-b resol: 1\n')
@staticmethod
def prepareGrass7Execution(commands):
env = os.environ.copy()
if isWindows():
Grass7Utils.createGrass7Script(commands)
command = ['cmd.exe', '/C ', Grass7Utils.grassScriptFilename()]
else:
gisrc = os.path.join(userFolder(), 'processing.gisrc7')
env['GISRC'] = gisrc
env['GRASS_MESSAGE_FORMAT'] = 'plain'
env['GRASS_BATCH_JOB'] = Grass7Utils.grassBatchJobFilename()
if 'GISBASE' in env:
del env['GISBASE']
Grass7Utils.createGrass7BatchJobFileFromGrass7Commands(commands)
os.chmod(Grass7Utils.grassBatchJobFilename(), stat.S_IEXEC
| stat.S_IREAD | stat.S_IWRITE)
if isMac() and os.path.exists(os.path.join(Grass7Utils.grassPath(), 'grass.sh')):
command = os.path.join(Grass7Utils.grassPath(), 'grass.sh') + ' ' \
+ os.path.join(Grass7Utils.grassMapsetFolder(), 'PERMANENT')
else:
command = 'grass70 ' + os.path.join(Grass7Utils.grassMapsetFolder(), 'PERMANENT')
return command, env
@staticmethod
def executeGrass7(commands, progress, outputCommands=None):
loglines = []
loglines.append(Grass7Utils.tr('GRASS GIS 7 execution console output'))
grassOutDone = False
command, grassenv = Grass7Utils.prepareGrass7Execution(commands)
with subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
env=grassenv
) as proc:
for line in iter(proc.stdout.readline, ''):
if 'GRASS_INFO_PERCENT' in line:
try:
progress.setPercentage(int(line[len('GRASS_INFO_PERCENT') + 2:]))
except:
pass
else:
if 'r.out' in line or 'v.out' in line:
grassOutDone = True
loglines.append(line)
progress.setConsoleInfo(line)
# Some GRASS scripts, like r.mapcalculator or r.fillnulls, call
# other GRASS scripts during execution. This may override any
# commands that are still to be executed by the subprocess, which
# are usually the output ones. If that is the case runs the output
# commands again.
if not grassOutDone and outputCommands:
command, grassenv = Grass7Utils.prepareGrass7Execution(outputCommands)
with subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
env=grassenv
) as proc:
for line in iter(proc.stdout.readline, ''):
if 'GRASS_INFO_PERCENT' in line:
try:
progress.setPercentage(int(
line[len('GRASS_INFO_PERCENT') + 2:]))
except:
pass
else:
loglines.append(line)
progress.setConsoleInfo(line)
if ProcessingConfig.getSetting(Grass7Utils.GRASS_LOG_CONSOLE):
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
# GRASS session is used to hold the layers already exported or
# produced in GRASS between multiple calls to GRASS algorithms.
# This way they don't have to be loaded multiple times and
# following algorithms can use the results of the previous ones.
# Starting a session just involves creating the temp mapset
# structure
@staticmethod
def startGrass7Session():
if not Grass7Utils.sessionRunning:
Grass7Utils.createTempMapset()
Grass7Utils.sessionRunning = True
# End session by removing the temporary GRASS mapset and all
# the layers.
@staticmethod
def endGrass7Session():
shutil.rmtree(Grass7Utils.grassMapsetFolder(), True)
Grass7Utils.sessionRunning = False
Grass7Utils.sessionLayers = {}
Grass7Utils.projectionSet = False
@staticmethod
def getSessionLayers():
return Grass7Utils.sessionLayers
@staticmethod
def addSessionLayers(exportedLayers):
Grass7Utils.sessionLayers = dict(
list(Grass7Utils.sessionLayers.items())
+ list(exportedLayers.items()))
@staticmethod
def checkGrass7IsInstalled(ignorePreviousState=False):
if isWindows():
path = Grass7Utils.grassPath()
if path == '':
return Grass7Utils.tr(
'GRASS GIS 7 folder is not configured. Please configure '
'it before running GRASS GIS 7 algorithms.')
cmdpath = os.path.join(path, 'bin', 'r.out.gdal.exe')
if not os.path.exists(cmdpath):
return Grass7Utils.tr(
'The specified GRASS 7 folder "{}" does not contain '
'a valid set of GRASS 7 modules.\nPlease, go to the '
'Processing settings dialog, and check that the '
'GRASS 7\nfolder is correctly configured'.format(os.path.join(path, 'bin')))
if not ignorePreviousState:
if Grass7Utils.isGrass7Installed:
return
try:
from processing import runalg
result = runalg(
'grass7:v.voronoi',
points(),
False,
False,
None,
-1,
0.0001,
0,
None,
)
if not os.path.exists(result['output']):
return Grass7Utils.tr(
'It seems that GRASS GIS 7 is not correctly installed and '
'configured in your system.\nPlease install it before '
'running GRASS GIS 7 algorithms.')
except:
return Grass7Utils.tr(
'Error while checking GRASS GIS 7 installation. GRASS GIS 7 '
'might not be correctly configured.\n')
Grass7Utils.isGrass7Installed = True
@staticmethod
def tr(string, context=''):
if context == '':
context = 'Grass7Utils'
return QCoreApplication.translate(context, string)
@staticmethod
def writeCommand(output, command):
try:
# Python 2
output.write(command.encode('utf8') + '\n')
except TypeError:
# Python 3
output.write(command + '\n')
| gpl-2.0 |
leki75/ansible | lib/ansible/modules/cloud/rackspace/rax_cbs_attachments.py | 14 | 6902 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_cbs_attachments
short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
description:
- Manipulate Rackspace Cloud Block Storage Volume Attachments
version_added: 1.6
options:
device:
description:
- The device path to attach the volume to, e.g. /dev/xvde.
- Before 2.4 this was a required field. Now it can be left to null to auto assign the device name.
default: null
required: false
volume:
description:
- Name or id of the volume to attach/detach
default: null
required: true
server:
description:
- Name or id of the server to attach/detach
default: null
required: true
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
wait:
description:
- wait for the volume to be in 'in-use'/'available' state before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Attach a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume attach request
local_action:
module: rax_cbs_attachments
credentials: ~/.raxpub
volume: my-volume
server: my-server
device: /dev/xvdd
region: DFW
wait: yes
state: present
register: my_volume
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout):
cbs = pyrax.cloud_blockstorage
cs = pyrax.cloudservers
if cbs is None or cs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
changed = False
instance = {}
volume = rax_find_volume(module, pyrax, volume)
if not volume:
module.fail_json(msg='No matching storage volumes were found')
if state == 'present':
server = rax_find_server(module, pyrax, server)
if (volume.attachments and
volume.attachments[0]['server_id'] == server.id):
changed = False
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
else:
try:
volume.attach_to_instance(server, mountpoint=device)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
volume.get()
for key, value in vars(volume).items():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait:
attempts = wait_timeout / 5
pyrax.utils.wait_until(volume, 'status', 'in-use',
interval=5, attempts=attempts)
volume.get()
result['volume'] = rax_to_dict(volume)
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
server = rax_find_server(module, pyrax, server)
if (volume.attachments and
volume.attachments[0]['server_id'] == server.id):
try:
volume.detach()
if wait:
pyrax.utils.wait_until(volume, 'status', 'available',
interval=3, attempts=0,
verbose=False)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
volume.get()
changed = True
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
result = dict(changed=changed, volume=rax_to_dict(volume))
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
device=dict(required=False),
volume=dict(required=True),
server=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
device = module.params.get('device')
volume = module.params.get('volume')
server = module.params.get('server')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
if __name__ == '__main__':
main()
| gpl-3.0 |
philotas/enigma2 | lib/python/Components/Converter/ServicePosition.py | 13 | 21710 | from Converter import Converter
from Poll import Poll
from enigma import iPlayableService
from Components.Element import cached, ElementError
from Components.config import config
class ServicePosition(Poll, Converter, object):
TYPE_LENGTH = 0
TYPE_POSITION = 1
TYPE_REMAINING = 2
TYPE_GAUGE = 3
TYPE_SUMMARY = 4
TYPE_VFD_LENGTH = 5
TYPE_VFD_POSITION = 6
TYPE_VFD_REMAINING = 7
TYPE_VFD_GAUGE = 8
TYPE_VFD_SUMMARY = 9
def __init__(self, type):
Poll.__init__(self)
Converter.__init__(self, type)
args = type.split(',')
type = args.pop(0)
self.negate = 'Negate' in args
self.detailed = 'Detailed' in args
self.showHours = 'ShowHours' in args
self.showNoSeconds = 'ShowNoSeconds' in args
self.OnlyMinute = 'OnlyMinute' in args
if type == "Length":
self.type = self.TYPE_LENGTH
elif type == "Position":
self.type = self.TYPE_POSITION
elif type == "Remaining":
self.type = self.TYPE_REMAINING
elif type == "Gauge":
self.type = self.TYPE_GAUGE
elif type == "Summary":
self.type = self.TYPE_SUMMARY
elif type == "VFDLength":
self.type = self.TYPE_VFD_LENGTH
elif type == "VFDPosition":
self.type = self.TYPE_VFD_POSITION
elif type == "VFDRemaining":
self.type = self.TYPE_VFD_REMAINING
elif type == "VFDGauge":
self.type = self.TYPE_VFD_GAUGE
elif type == "VFDSummary":
self.type = self.TYPE_VFD_SUMMARY
else:
raise ElementError("type must be {Length|Position|Remaining|Gauge|Summary} with optional arguments {Negate|Detailed|ShowHours|ShowNoSeconds} for ServicePosition converter")
if self.detailed:
self.poll_interval = 100
elif self.type == self.TYPE_LENGTH or self.type == self.TYPE_VFD_LENGTH:
self.poll_interval = 2000
else:
self.poll_interval = 500
self.poll_enabled = True
def getSeek(self):
s = self.source.service
return s and s.seek()
@cached
def getPosition(self):
seek = self.getSeek()
if seek is None:
return None
pos = seek.getPlayPosition()
if pos[0]:
return 0
return pos[1]
@cached
def getLength(self):
seek = self.getSeek()
if seek is None:
return None
length = seek.getLength()
if length[0]:
return 0
return length[1]
@cached
def getCutlist(self):
service = self.source.service
cue = service and service.cueSheet()
return cue and cue.getCutList()
@cached
def getText(self):
seek = self.getSeek()
if seek is None:
return ""
if self.type == self.TYPE_SUMMARY or self.type == self.TYPE_SUMMARY:
s = self.position / 90000
e = (self.length / 90000) - s
return "%02d:%02d +%2dm" % (s/60, s%60, e/60)
l = self.length
p = self.position
r = self.length - self.position # Remaining
if l < 0:
return ""
if not self.detailed:
l /= 90000
p /= 90000
r /= 90000
if self.negate: l = -l
if self.negate: p = -p
if self.negate: r = -r
if l >= 0:
sign_l = ""
else:
l = -l
sign_l = "-"
if p >= 0:
sign_p = ""
else:
p = -p
sign_p = "-"
if r >= 0:
sign_r = ""
else:
r = -r
sign_r = "-"
if self.type < 5:
if config.usage.elapsed_time_positive_osd.value:
sign_p = "+"
sign_r = "-"
sign_l = ""
else:
sign_p = "-"
sign_r = "+"
sign_l = ""
if config.usage.swap_media_time_display_on_osd.value == "1": # Mins
if self.type == self.TYPE_LENGTH:
return ngettext("%d Min", "%d Mins", (l/60)) % (l/60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
return sign_p + "%d " % (p/60) + sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return sign_r + "%d " % (r/60) + sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
else:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif self.type == self.TYPE_REMAINING:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif config.usage.swap_media_time_display_on_osd.value == "2": # Mins Secs
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/60, l%60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/60, p%60)
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d " % (p/60, p%60) + sign_r + "%d:%02d" % (r/60, r%60)
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d " % (r/60, r%60) + sign_p + "%d:%02d" % (p/60, p%60)
else:
return sign_r + "%d:%02d" % (r/60, r%60)
elif self.type == self.TYPE_REMAINING:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/60, p%60)
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d" % (r/60, r%60)
elif config.usage.swap_media_time_display_on_osd.value == "3": # Hours Mins
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d " % (p/3600, p%3600/60) + sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d " % (r/3600, r%3600/60) + sign_p + "%d:%02d" % (p/3600, p%3600/60)
else:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif self.type == self.TYPE_REMAINING:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif config.usage.swap_media_time_display_on_osd.value == "4": # Hours Mins Secs
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d:%02d " % (p/3600, p%3600/60, p%60) + sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d:%02d " % (r/3600, r%3600/60, r%60) + sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
else:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif self.type == self.TYPE_REMAINING:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif config.usage.swap_media_time_display_on_osd.value == "5": # Percentage
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_POSITION:
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
try:
return sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif config.usage.swap_time_remaining_on_osd.value == "2": # Elapsed & Remaining
try:
return sign_p + "%d%% " % ((float(p + 0.0) / float(l + 0.0)) * 100) + sign_r + "%d%%" % ((float(r + 0.0) / float(l + 0.0)) * 100 + 1)
except:
return ""
elif config.usage.swap_time_remaining_on_osd.value == "3": # Remaining & Elapsed
try:
return sign_r + "%d%% " % ((float(r + 0.0) / float(l + 0.0)) * 100 +1 ) + sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
else:
try:
return sign_r + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif self.type == self.TYPE_REMAINING:
test = 0
if config.usage.swap_time_remaining_on_osd.value == "1": # Elapsed
try:
return sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif config.usage.swap_time_remaining_on_osd.value == "2" or config.usage.swap_time_remaining_on_osd.value == "3": # Elapsed & Remaining
return ""
else:
try:
return sign_r + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
else: # Skin Setting
if not self.detailed:
if self.showHours:
if self.showNoSeconds:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_POSITION:
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
else:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
elif self.type == self.TYPE_POSITION:
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
else:
if self.showNoSeconds:
if self.type == self.TYPE_LENGTH:
return ngettext("%d Min", "%d Mins", (l/60)) % (l/60)
elif self.type == self.TYPE_POSITION:
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif self.type == self.TYPE_REMAINING and self.OnlyMinute:
return ngettext("%d", "%d", (r/60)) % (r/60)
elif self.type == self.TYPE_REMAINING:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
else:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d" % (l/60, l%60)
elif self.type == self.TYPE_POSITION:
return sign_p + "%d:%02d" % (p/60, p%60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d" % (r/60, r%60)
else:
if self.showHours:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d:%02d:%03d" % ((l/3600/90000), (l/90000)%3600/60, (l/90000)%60, (l%90000)/90)
elif self.type == self.TYPE_POSITION:
return sign_r + "%d:%02d:%02d:%03d" % ((r/3600/90000), (r/90000)%3600/60, (r/90000)%60, (r%90000)/90)
elif self.type == self.TYPE_REMAINING:
return sign_p + "%d:%02d:%02d:%03d" % ((p/3600/90000), (p/90000)%3600/60, (p/90000)%60, (p%90000)/90)
else:
if self.type == self.TYPE_LENGTH:
return sign_l + "%d:%02d:%03d" % ((l/60/90000), (l/90000)%60, (l%90000)/90)
elif self.type == self.TYPE_POSITION:
return sign_p + "%d:%02d:%03d" % ((p/60/90000), (p/90000)%60, (p%90000)/90)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d:%03d" % ((r/60/90000), (r/90000)%60, (r%90000)/90)
else:
if config.usage.elapsed_time_positive_vfd.value:
sign_p = "+"
sign_r = "-"
else:
sign_p = "-"
sign_r = "+"
if config.usage.swap_media_time_display_on_vfd.value == "1": # Mins
if self.type == self.TYPE_VFD_LENGTH:
return ngettext("%d Min", "%d Mins", (l/60)) % (l/60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
return sign_p + "%d " % (p/60) + sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return sign_r + "%d " % (r/60) + sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
else:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif self.type == self.TYPE_VFD_REMAINING:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
elif config.usage.swap_media_time_display_on_vfd.value == "2": # Mins Secs
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/60, l%60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/60, p%60)
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d " % (p/60, p%60) + sign_r + "%d:%02d" % (r/60, r%60)
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d " % (r/60, r%60) + sign_p + "%d:%02d" % (p/60, p%60)
else:
return sign_r + "%d:%02d" % (r/60, r%60)
elif self.type == self.TYPE_VFD_REMAINING:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/60, p%60)
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d" % (r/60, r%60)
elif config.usage.swap_media_time_display_on_vfd.value == "3": # Hours Mins
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d " % (p/3600, p%3600/60) + sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d " % (r/3600, r%3600/60) + sign_p + "%d:%02d" % (p/3600, p%3600/60)
else:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif self.type == self.TYPE_VFD_REMAINING:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
elif config.usage.swap_media_time_display_on_vfd.value == "4": # Hours Mins Secs
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
return sign_p + "%d:%02d:%02d " % (p/3600, p%3600/60, p%60) + sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return sign_r + "%d:%02d:%02d " % (r/3600, r%3600/60, r%60) + sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
else:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif self.type == self.TYPE_VFD_REMAINING:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
return ""
else:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
elif config.usage.swap_media_time_display_on_vfd.value == "5": # Percentage
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_VFD_POSITION:
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
try:
return sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif config.usage.swap_time_remaining_on_vfd.value == "2": # Elapsed & Remaining
try:
return sign_p + "%d%% " % ((float(p + 0.0) / float(l + 0.0)) * 100) + sign_r + "%d%%" % ((float(r + 0.0) / float(l + 0.0)) * 100 + 1)
except:
return ""
elif config.usage.swap_time_remaining_on_vfd.value == "3": # Remaining & Elapsed
try:
return sign_r + "%d%% " % ((float(r + 0.0) / float(l + 0.0)) * 100 +1 ) + sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
else:
try:
return sign_r + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif self.type == self.TYPE_VFD_REMAINING:
test = 0
if config.usage.swap_time_remaining_on_vfd.value == "1": # Elapsed
try:
return sign_p + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
elif config.usage.swap_time_remaining_on_vfd.value == "2" or config.usage.swap_time_remaining_on_vfd.value == "3": # Elapsed & Remaining
return ""
else:
try:
return sign_r + "%d%%" % ((float(p + 0.0) / float(l + 0.0)) * 100)
except:
return ""
else: # Skin Setting
if not self.detailed:
if self.showHours:
if self.showNoSeconds:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/3600, l%3600/60)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + "%d:%02d" % (p/3600, p%3600/60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d" % (r/3600, r%3600/60)
else:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d:%02d" % (l/3600, l%3600/60, l%60)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + "%d:%02d:%02d" % (p/3600, p%3600/60, p%60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d:%02d" % (r/3600, r%3600/60, r%60)
else:
if self.showNoSeconds:
if self.type == self.TYPE_VFD_LENGTH:
return ngettext("%d Min", "%d Mins", (l/60)) % (l/60)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + ngettext("%d Min", "%d Mins", (p/60)) % (p/60)
elif self.type == self.TYPE_VFD_REMAINING:
return sign_r + ngettext("%d Min", "%d Mins", (r/60)) % (r/60)
else:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d" % (l/60, l%60)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + "%d:%02d" % (p/60, p%60)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d" % (r/60, r%60)
else:
if self.showHours:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d:%02d:%03d" % ((l/3600/90000), (l/90000)%3600/60, (l/90000)%60, (l%90000)/90)
elif self.type == self.TYPE_VFD_POSITION:
return sign_r + "%d:%02d:%02d:%03d" % ((r/3600/90000), (r/90000)%3600/60, (r/90000)%60, (r%90000)/90)
elif self.type == self.TYPE_REMAINING:
return sign_p + "%d:%02d:%02d:%03d" % ((p/3600/90000), (p/90000)%3600/60, (p/90000)%60, (p%90000)/90)
else:
if self.type == self.TYPE_VFD_LENGTH:
return sign_l + "%d:%02d:%03d" % ((l/60/90000), (l/90000)%60, (l%90000)/90)
elif self.type == self.TYPE_VFD_POSITION:
return sign_p + "%d:%02d:%03d" % ((p/60/90000), (p/90000)%60, (p%90000)/90)
elif self.type == self.TYPE_REMAINING:
return sign_r + "%d:%02d:%03d" % ((r/60/90000), (r/90000)%60, (r%90000)/90)
# range/value are for the Progress renderer
range = 10000
@cached
def getValue(self):
pos = self.position
len = self.length
if pos is None or len is None or len <= 0:
return None
return pos * 10000 / len
position = property(getPosition)
length = property(getLength)
cutlist = property(getCutlist)
text = property(getText)
value = property(getValue)
def changed(self, what):
cutlist_refresh = what[0] != self.CHANGED_SPECIFIC or what[1] in (iPlayableService.evCuesheetChanged,)
time_refresh = what[0] == self.CHANGED_POLL or what[0] == self.CHANGED_SPECIFIC and what[1] in (iPlayableService.evCuesheetChanged,)
if cutlist_refresh:
if self.type == self.TYPE_GAUGE:
self.downstream_elements.cutlist_changed()
if time_refresh:
self.downstream_elements.changed(what)
| gpl-2.0 |
DARKPOP/external_chromium_org | chrome/tools/webforms_extractor.py | 185 | 10187 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts registration forms from the corresponding HTML files.
Used for extracting forms within HTML files. This script is used in
conjunction with the webforms_aggregator.py script, which aggregates web pages
with fillable forms (i.e registration forms).
The purpose of this script is to extract out all non-form elements that may be
causing parsing errors and timeout issues when running browser_tests.
This script extracts all forms from a HTML file.
If there are multiple forms per downloaded site, multiple files are created
for each form.
Used as a standalone script but assumes that it is run from the directory in
which it is checked into.
Usage: forms_extractor.py [options]
Options:
-l LOG_LEVEL, --log_level=LOG_LEVEL,
LOG_LEVEL: debug, info, warning or error [default: error]
-j, --js extracts javascript elements from web form.
-h, --help show this help message and exit
"""
import glob
import logging
from optparse import OptionParser
import os
import re
import sys
class FormsExtractor(object):
"""Extracts HTML files, leaving only registration forms from the HTML file."""
_HTML_FILES_PATTERN = r'*.html'
_HTML_FILE_PREFIX = r'grabber-'
_FORM_FILE_PREFIX = r'grabber-stripped-'
_REGISTRATION_PAGES_DIR = os.path.join(os.pardir, 'test', 'data', 'autofill',
'heuristics', 'input')
_EXTRACTED_FORMS_DIR = os.path.join(os.pardir, 'test', 'data', 'autofill',
'heuristics', 'input')
logger = logging.getLogger(__name__)
log_handlers = {'StreamHandler': None}
# This pattern is used for retrieving the form location comment located at the
# top of each downloaded HTML file indicating where the form originated from.
_RE_FORM_LOCATION_PATTERN = re.compile(
ur"""
<!--Form\s{1}Location: # Starting of form location comment.
.*? # Any characters (non-greedy).
--> # Ending of the form comment.
""", re.U | re.S | re.I | re.X)
# This pattern is used for removing all script code.
_RE_SCRIPT_PATTERN = re.compile(
ur"""
<script # A new opening '<script' tag.
\b # The end of the word 'script'.
.*? # Any characters (non-greedy).
> # Ending of the (opening) tag: '>'.
.*? # Any characters (non-greedy) between the tags.
</script\s*> # The '</script>' closing tag.
""", re.U | re.S | re.I | re.X)
# This pattern is used for removing all href js code.
_RE_HREF_JS_PATTERN = re.compile(
ur"""
\bhref # The word href and its beginning.
\s*=\s* # The '=' with all whitespace before and after it.
(?P<quote>[\'\"]) # A single or double quote which is captured.
\s*javascript\s*: # The word 'javascript:' with any whitespace possible.
.*? # Any characters (non-greedy) between the quotes.
\1 # The previously captured single or double quote.
""", re.U | re.S | re.I | re.X)
_RE_EVENT_EXPR = (
ur"""
\b # The beginning of a new word.
on\w+? # All words starting with 'on' (non-greedy)
# example: |onmouseover|.
\s*=\s* # The '=' with all whitespace before and after it.
(?P<quote>[\'\"]) # A captured single or double quote.
.*? # Any characters (non-greedy) between the quotes.
\1 # The previously captured single or double quote.
""")
# This pattern is used for removing code with js events, such as |onload|.
# By adding the leading |ur'<[^<>]*?'| and the trailing |'ur'[^<>]*?>'| the
# pattern matches to strings such as '<tr class="nav"
# onmouseover="mOvr1(this);" onmouseout="mOut1(this);">'
_RE_TAG_WITH_EVENTS_PATTERN = re.compile(
ur"""
< # Matches character '<'.
[^<>]*? # Matches any characters except '<' and '>' (non-greedy).""" +
_RE_EVENT_EXPR +
ur"""
[^<>]*? # Matches any characters except '<' and '>' (non-greedy).
> # Matches character '>'.
""", re.U | re.S | re.I | re.X)
# Adds whitespace chars at the end of the matched event. Also match trailing
# whitespaces for JS events. Do not match leading whitespace.
# For example: |< /form>| is invalid HTML and does not exist but |</form >| is
# considered valid HTML.
_RE_EVENT_PATTERN = re.compile(
_RE_EVENT_EXPR + ur'\s*', re.U | re.S | re.I | re.X)
# This pattern is used for finding form elements.
_RE_FORM_PATTERN = re.compile(
ur"""
<form # A new opening '<form' tag.
\b # The end of the word 'form'.
.*? # Any characters (non-greedy).
> # Ending of the (opening) tag: '>'.
.*? # Any characters (non-greedy) between the tags.
</form\s*> # The '</form>' closing tag.
""", re.U | re.S | re.I | re.X)
def __init__(self, input_dir=_REGISTRATION_PAGES_DIR,
output_dir=_EXTRACTED_FORMS_DIR, logging_level=None):
"""Creates a FormsExtractor object.
Args:
input_dir: the directory of HTML files.
output_dir: the directory where the registration form files will be
saved.
logging_level: verbosity level, default is None.
Raises:
IOError exception if input directory doesn't exist.
"""
if logging_level:
if not self.log_handlers['StreamHandler']:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
self.log_handlers['StreamHandler'] = console
self.logger.addHandler(console)
self.logger.setLevel(logging_level)
else:
if self.log_handlers['StreamHandler']:
self.logger.removeHandler(self.log_handlers['StreamHandler'])
self.log_handlers['StreamHandler'] = None
self._input_dir = input_dir
self._output_dir = output_dir
if not os.path.isdir(self._input_dir):
error_msg = 'Directory "%s" doesn\'t exist.' % self._input_dir
self.logger.error('Error: %s', error_msg)
raise IOError(error_msg)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self._form_location_comment = ''
def _SubstituteAllEvents(self, matchobj):
"""Remove all js events that are present as attributes within a tag.
Args:
matchobj: A regexp |re.MatchObject| containing text that has at least one
event. Example: |<tr class="nav" onmouseover="mOvr1(this);"
onmouseout="mOut1(this);">|.
Returns:
The text containing the tag with all the attributes except for the tags
with events. Example: |<tr class="nav">|.
"""
tag_with_all_attrs = matchobj.group(0)
return self._RE_EVENT_PATTERN.sub('', tag_with_all_attrs)
def Extract(self, strip_js_only):
"""Extracts and saves the extracted registration forms.
Iterates through all the HTML files.
Args:
strip_js_only: If True, only Javascript is stripped from the HTML content.
Otherwise, all non-form elements are stripped.
"""
pathname_pattern = os.path.join(self._input_dir, self._HTML_FILES_PATTERN)
html_files = [f for f in glob.glob(pathname_pattern) if os.path.isfile(f)]
for filename in html_files:
self.logger.info('Stripping file "%s" ...', filename)
with open(filename, 'U') as f:
html_content = self._RE_TAG_WITH_EVENTS_PATTERN.sub(
self._SubstituteAllEvents,
self._RE_HREF_JS_PATTERN.sub(
'', self._RE_SCRIPT_PATTERN.sub('', f.read())))
form_filename = os.path.split(filename)[1] # Path dropped.
form_filename = form_filename.replace(self._HTML_FILE_PREFIX, '', 1)
(form_filename, extension) = os.path.splitext(form_filename)
form_filename = (self._FORM_FILE_PREFIX + form_filename +
'%s' + extension)
form_filename = os.path.join(self._output_dir, form_filename)
if strip_js_only:
form_filename = form_filename % ''
try:
with open(form_filename, 'w') as f:
f.write(html_content)
except IOError as e:
self.logger.error('Error: %s', e)
continue
else: # Remove all non form elements.
match = self._RE_FORM_LOCATION_PATTERN.search(html_content)
if match:
form_location_comment = match.group() + os.linesep
else:
form_location_comment = ''
forms_iterator = self._RE_FORM_PATTERN.finditer(html_content)
for form_number, form_match in enumerate(forms_iterator, start=1):
form_content = form_match.group()
numbered_form_filename = form_filename % form_number
try:
with open(numbered_form_filename, 'w') as f:
f.write(form_location_comment)
f.write(form_content)
except IOError as e:
self.logger.error('Error: %s', e)
continue
self.logger.info('\tFile "%s" extracted SUCCESSFULLY!', filename)
def main():
parser = OptionParser()
parser.add_option(
'-l', '--log_level', metavar='LOG_LEVEL', default='error',
help='LOG_LEVEL: debug, info, warning or error [default: %default]')
parser.add_option(
'-j', '--js', dest='js', action='store_true', default=False,
help='Removes all javascript elements [default: %default]')
(options, args) = parser.parse_args()
options.log_level = options.log_level.upper()
if options.log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR']:
print 'Wrong log_level argument.'
parser.print_help()
return 1
options.log_level = getattr(logging, options.log_level)
extractor = FormsExtractor(logging_level=options.log_level)
extractor.Extract(options.js)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
pyGrowler/Growler | tests/utils.py | 2 | 1128 | #
# tests/utils
#
"""
Useful functions for all tests
"""
import asyncio
import pytest
from growler.aio.http_protocol import GrowlerHTTPProtocol
import growler
def random_port():
from random import randint
return randint(1024, 2**16)
@asyncio.coroutine
def setup_test_server(unused_tcp_port, event_loop):
"""
Sets up a GrowlerProtocol server for testing
"""
# proto = growler.protocol.GrowlerProtocol
proto = TestProtocol
server = yield from event_loop.create_server(proto, '127.0.0.1', unused_tcp_port)
return server, unused_tcp_port
@asyncio.coroutine
def setup_http_server(loop, port):
"""
Sets up a GrowlerHTTPProtocol server for testing
"""
# proto = growler.protocol.GrowlerHTTPProtocol
app = growler.App()
def proto():
return GrowlerHTTPProtocol(app)
return (yield from loop.create_server(proto, '127.0.0.1', port))
def teardown_server(server, loop=asyncio.get_event_loop()):
"""
'Generic' tear down a server and wait on the loop for everything to close.
"""
server.close()
loop.run_until_complete(server.wait_closed())
| apache-2.0 |
nirzari18/Query-Analysis-Application-on-Google-App-Engine | lib/simplejson/decoder.py | 132 | 14721 | """Implementation of JSONDecoder
"""
from __future__ import absolute_import
import re
import sys
import struct
from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr
from .scanner import make_scanner, JSONDecodeError
def _import_c_scanstring():
try:
from ._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
# NOTE (3.1.0): JSONDecodeError may still be imported from this module for
# compatibility, but it was never in the __all__
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = fromhex('7FF80000000000007FF0000000000000')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u('"'), '\\': u('\u005c'), '/': u('/'),
'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'),
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(state, encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting ':' delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
value, end = scan_once(s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or '}'", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
elif nextchar == '':
raise JSONDecodeError("Expecting value or ']'", s, end)
_append = values.append
while True:
value, end = scan_once(s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or ']'", s, end - 1)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | str, unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
if encoding is None:
encoding = DEFAULT_ENCODING
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match, _PY3=PY3):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
if _PY3 and isinstance(s, binary_type):
s = s.decode(self.encoding)
obj, end = self.raw_decode(s)
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
Optionally, ``idx`` can be used to specify an offset in ``s`` where
the JSON document begins.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
if idx < 0:
# Ensure that raw_decode bails on negative indexes, the regex
# would otherwise mask this behavior. #98
raise JSONDecodeError('Expecting value', s, idx)
if _PY3 and not isinstance(s, text_type):
raise TypeError("Input string must be text, not bytes")
# strip UTF-8 bom
if len(s) > idx:
ord0 = ord(s[idx])
if ord0 == 0xfeff:
idx += 1
elif ord0 == 0xef and s[idx:idx + 3] == '\xef\xbb\xbf':
idx += 3
return self.scan_once(s, idx=_w(s, idx).end())
| apache-2.0 |
classcat/cctf | cctf/layers/normalization.py | 1 | 5969 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import tensorflow as tf
from tensorflow.python.training import moving_averages
# masao
import cctf
#import tflearn
from .. import utils
from .. import variables as vs
def batch_normalization(incoming, beta=0.0, gamma=1.0, epsilon=1e-5,
decay=0.9, stddev=0.002, trainable=True,
restore=True, reuse=False, scope=None,
name="BatchNormalization"):
""" Batch Normalization.
Normalize activations of the previous layer at each batch.
Arguments:
incoming: `Tensor`. Incoming Tensor.
beta: `float`. Default: 0.0.
gamma: `float`. Default: 1.0.
epsilon: `float`. Defalut: 1e-5.
decay: `float`. Default: 0.9.
stddev: `float`. Standard deviation for weights initialization.
trainable: `bool`. If True, weights will be trainable.
restore: `bool`. If True, this layer weights will be restored when
loading a model.
reuse: `bool`. If True and 'scope' is provided, this layer variables
will be reused (shared).
scope: `str`. Define this layer scope (optional). A scope can be
used to share variables between layers. Note that scope will
override name.
name: `str`. A name for this layer (optional).
References:
Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shif. Sergey Ioffe, Christian Szegedy. 2015.
Links:
[http://arxiv.org/pdf/1502.03167v3.pdf](http://arxiv.org/pdf/1502.03167v3.pdf)
"""
input_shape = utils.get_incoming_shape(incoming)
input_ndim = len(input_shape)
gamma_init = tf.random_normal_initializer(mean=gamma, stddev=stddev)
with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
name = scope.name
beta = vs.variable('beta', shape=[input_shape[-1]],
initializer=tf.constant_initializer(beta),
trainable=trainable, restore=restore)
gamma = vs.variable('gamma', shape=[input_shape[-1]],
initializer=gamma_init, trainable=trainable,
restore=restore)
# Track per layer variables
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, beta)
tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, gamma)
if not restore:
tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, beta)
tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, gamma)
axis = list(range(input_ndim - 1))
moving_mean = vs.variable('moving_mean',
input_shape[-1:],
initializer=tf.zeros_initializer,
trainable=False,
restore=restore)
moving_variance = vs.variable('moving_variance',
input_shape[-1:],
initializer=tf.ones_initializer,
trainable=False,
restore=restore)
# Define a function to update mean and variance
def update_mean_var():
mean, variance = tf.nn.moments(incoming, axis)
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
with tf.control_dependencies(
[update_moving_mean, update_moving_variance]):
return tf.identity(mean), tf.identity(variance)
# Retrieve variable managing training mode
is_training = tflearn.get_training_mode()
mean, var = tf.python.control_flow_ops.cond(
is_training, update_mean_var, lambda: (moving_mean, moving_variance))
try:
inference = tf.nn.batch_normalization(
incoming, mean, var, beta, gamma, epsilon)
inference.set_shape(input_shape)
# Fix for old Tensorflow
except Exception as e:
inference = tf.nn.batch_norm_with_global_normalization(
incoming, mean, var, beta, gamma, epsilon,
scale_after_normalization=True,
)
inference.set_shape(input_shape)
# Add attributes for easy access
inference.scope = scope
inference.beta = beta
inference.gamma = gamma
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)
return inference
def local_response_normalization(incoming, depth_radius=5, bias=1.0,
alpha=0.0001, beta=0.75,
name="LocalResponseNormalization"):
""" Local Response Normalization.
Input:
4-D Tensor Layer.
Output:
4-D Tensor Layer. (Same dimension as input).
Arguments:
incoming: `Tensor`. Incoming Tensor.
depth_radius: `int`. 0-D. Half-width of the 1-D normalization window.
Defaults to 5.
bias: `float`. An offset (usually positive to avoid dividing by 0).
Defaults to 1.0.
alpha: `float`. A scale factor, usually positive. Defaults to 0.0001.
beta: `float`. An exponent. Defaults to `0.5`.
name: `str`. A name for this layer (optional).
"""
with tf.name_scope(name) as scope:
inference = tf.nn.lrn(incoming, depth_radius=depth_radius,
bias=bias, alpha=alpha,
beta=beta, name=name)
inference.scope = scope
# Track output tensor.
tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)
return inference
| agpl-3.0 |
sogis/Quantum-GIS | python/ext-libs/jinja2/testsuite/security.py | 415 | 6204 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.security
~~~~~~~~~~~~~~~~~~~~~~~~~
Checks the sandbox and other security features.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment
from jinja2.sandbox import SandboxedEnvironment, \
ImmutableSandboxedEnvironment, unsafe
from jinja2 import Markup, escape
from jinja2.exceptions import SecurityError, TemplateSyntaxError, \
TemplateRuntimeError
from jinja2._compat import text_type
class PrivateStuff(object):
def bar(self):
return 23
@unsafe
def foo(self):
return 42
def __repr__(self):
return 'PrivateStuff'
class PublicStuff(object):
bar = lambda self: 23
_foo = lambda self: 42
def __repr__(self):
return 'PublicStuff'
class SandboxTestCase(JinjaTestCase):
def test_unsafe(self):
env = SandboxedEnvironment()
self.assert_raises(SecurityError, env.from_string("{{ foo.foo() }}").render,
foo=PrivateStuff())
self.assert_equal(env.from_string("{{ foo.bar() }}").render(foo=PrivateStuff()), '23')
self.assert_raises(SecurityError, env.from_string("{{ foo._foo() }}").render,
foo=PublicStuff())
self.assert_equal(env.from_string("{{ foo.bar() }}").render(foo=PublicStuff()), '23')
self.assert_equal(env.from_string("{{ foo.__class__ }}").render(foo=42), '')
self.assert_equal(env.from_string("{{ foo.func_code }}").render(foo=lambda:None), '')
# security error comes from __class__ already.
self.assert_raises(SecurityError, env.from_string(
"{{ foo.__class__.__subclasses__() }}").render, foo=42)
def test_immutable_environment(self):
env = ImmutableSandboxedEnvironment()
self.assert_raises(SecurityError, env.from_string(
'{{ [].append(23) }}').render)
self.assert_raises(SecurityError, env.from_string(
'{{ {1:2}.clear() }}').render)
def test_restricted(self):
env = SandboxedEnvironment()
self.assert_raises(TemplateSyntaxError, env.from_string,
"{% for item.attribute in seq %}...{% endfor %}")
self.assert_raises(TemplateSyntaxError, env.from_string,
"{% for foo, bar.baz in seq %}...{% endfor %}")
def test_markup_operations(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_template_data(self):
env = Environment(autoescape=True)
t = env.from_string('{% macro say_hello(name) %}'
'<p>Hello {{ name }}!</p>{% endmacro %}'
'{{ say_hello("<blink>foo</blink>") }}')
escaped_out = '<p>Hello <blink>foo</blink>!</p>'
assert t.render() == escaped_out
assert text_type(t.module) == escaped_out
assert escape(t.module) == escaped_out
assert t.module.say_hello('<blink>foo</blink>') == escaped_out
assert escape(t.module.say_hello('<blink>foo</blink>')) == escaped_out
def test_attr_filter(self):
env = SandboxedEnvironment()
tmpl = env.from_string('{{ cls|attr("__subclasses__")() }}')
self.assert_raises(SecurityError, tmpl.render, cls=int)
def test_binary_operator_intercepting(self):
def disable_op(left, right):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('1 + 2', {}, '3'), ('a + 2', {'a': 2}, '4'):
env = SandboxedEnvironment()
env.binop_table['+'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_binops = frozenset(['+'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
self.fail('expected runtime error')
def test_unary_operator_intercepting(self):
def disable_op(arg):
raise TemplateRuntimeError('that operator so does not work')
for expr, ctx, rv in ('-1', {}, '-1'), ('-a', {'a': 2}, '-2'):
env = SandboxedEnvironment()
env.unop_table['-'] = disable_op
t = env.from_string('{{ %s }}' % expr)
assert t.render(ctx) == rv
env.intercepted_unops = frozenset(['-'])
t = env.from_string('{{ %s }}' % expr)
try:
t.render(ctx)
except TemplateRuntimeError as e:
pass
else:
self.fail('expected runtime error')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SandboxTestCase))
return suite
| gpl-2.0 |
shin-/compose | compose/cli/verbose_proxy.py | 22 | 1770 | from __future__ import absolute_import
from __future__ import unicode_literals
import functools
import logging
import pprint
from itertools import chain
import six
def format_call(args, kwargs):
args = (repr(a) for a in args)
kwargs = ("{0!s}={1!r}".format(*item) for item in six.iteritems(kwargs))
return "({0})".format(", ".join(chain(args, kwargs)))
def format_return(result, max_lines):
if isinstance(result, (list, tuple, set)):
return "({0} with {1} items)".format(type(result).__name__, len(result))
if result:
lines = pprint.pformat(result).split('\n')
extra = '\n...' if len(lines) > max_lines else ''
return '\n'.join(lines[:max_lines]) + extra
return result
class VerboseProxy(object):
"""Proxy all function calls to another class and log method name, arguments
and return values for each call.
"""
def __init__(self, obj_name, obj, log_name=None, max_lines=10):
self.obj_name = obj_name
self.obj = obj
self.max_lines = max_lines
self.log = logging.getLogger(log_name or __name__)
def __getattr__(self, name):
attr = getattr(self.obj, name)
if not six.callable(attr):
return attr
return functools.partial(self.proxy_callable, name)
def proxy_callable(self, call_name, *args, **kwargs):
self.log.info("%s %s <- %s",
self.obj_name,
call_name,
format_call(args, kwargs))
result = getattr(self.obj, call_name)(*args, **kwargs)
self.log.info("%s %s -> %s",
self.obj_name,
call_name,
format_return(result, self.max_lines))
return result
| apache-2.0 |
simonwydooghe/ansible | test/units/modules/storage/netapp/test_na_ontap_snapshot.py | 38 | 8659 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit tests ONTAP Ansible module: na_ontap_nvme_snapshot'''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_snapshot \
import NetAppOntapSnapshot as my_module
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None):
''' save arguments '''
self.type = kind
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'snapshot':
xml = self.build_snapshot_info()
elif self.type == 'snapshot_fail':
raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test")
self.xml_out = xml
return xml
@staticmethod
def build_snapshot_info():
''' build xml data for snapshot-info '''
xml = netapp_utils.zapi.NaElement('xml')
data = {'num-records': 1,
'attributes-list': {'snapshot-info': {'comment': 'new comment',
'name': 'ansible',
'snapmirror-label': 'label12'}}}
xml.translate_struct(data)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.onbox = False
def set_default_args(self):
if self.onbox:
hostname = '10.193.75.3'
username = 'admin'
password = 'netapp1!'
vserver = 'ansible'
volume = 'ansible'
snapshot = 'ansible'
comment = 'new comment'
snapmirror_label = 'label12'
else:
hostname = 'hostname'
username = 'username'
password = 'password'
vserver = 'vserver'
volume = 'ansible'
snapshot = 'ansible'
comment = 'new comment'
snapmirror_label = 'label12'
return dict({
'hostname': hostname,
'username': username,
'password': password,
'vserver': vserver,
'volume': volume,
'snapshot': snapshot,
'comment': comment,
'snapmirror_label': snapmirror_label
})
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
my_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_ensure_get_called(self):
''' test get_snapshot() for non-existent snapshot'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = self.server
assert my_obj.get_snapshot() is None
def test_ensure_get_called_existing(self):
''' test get_snapshot() for existing snapshot'''
set_module_args(self.set_default_args())
my_obj = my_module()
my_obj.server = MockONTAPConnection(kind='snapshot')
assert my_obj.get_snapshot()
@patch('ansible.modules.storage.netapp.na_ontap_snapshot.NetAppOntapSnapshot.create_snapshot')
def test_successful_create(self, create_snapshot):
''' creating snapshot and testing idempotency '''
set_module_args(self.set_default_args())
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
create_snapshot.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_snapshot.NetAppOntapSnapshot.modify_snapshot')
def test_successful_modify(self, modify_snapshot):
''' modifying snapshot and testing idempotency '''
data = self.set_default_args()
data['comment'] = 'adding comment'
data['snapmirror_label'] = 'label22'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
modify_snapshot.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
data['comment'] = 'new comment'
data['snapmirror_label'] = 'label12'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_snapshot.NetAppOntapSnapshot.delete_snapshot')
def test_successful_delete(self, delete_snapshot):
''' deleting snapshot and testing idempotency '''
data = self.set_default_args()
data['state'] = 'absent'
set_module_args(data)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot')
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert exc.value.args[0]['changed']
delete_snapshot.assert_called_with()
# to reset na_helper from remembering the previous 'changed' value
my_obj = my_module()
if not self.onbox:
my_obj.server = self.server
with pytest.raises(AnsibleExitJson) as exc:
my_obj.apply()
assert not exc.value.args[0]['changed']
def test_if_all_methods_catch_exception(self):
module_args = {}
module_args.update(self.set_default_args())
set_module_args(module_args)
my_obj = my_module()
if not self.onbox:
my_obj.server = MockONTAPConnection('snapshot_fail')
with pytest.raises(AnsibleFailJson) as exc:
my_obj.create_snapshot()
assert 'Error creating snapshot ansible:' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.delete_snapshot()
assert 'Error deleting snapshot ansible:' in exc.value.args[0]['msg']
with pytest.raises(AnsibleFailJson) as exc:
my_obj.modify_snapshot()
assert 'Error modifying snapshot ansible:' in exc.value.args[0]['msg']
| gpl-3.0 |
ajtowns/bitcoin | test/util/bitcoin-util-test.py | 59 | 6594 | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for bitcoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.read_file(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, bitcoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| mit |
TemoaProject/temoa | temoa_model/get_comm_tech.py | 1 | 9277 | import sqlite3
import os
import sys
import getopt
import re
from collections import OrderedDict
def get_tperiods(inp_f):
file_ty = re.search(r"(\w+)\.(\w+)\b", inp_f) # Extract the input filename and extension
if not file_ty :
raise "The file type %s is not recognized." % inp_f
elif file_ty.group(2) not in ("db", "sqlite", "sqlite3", "sqlitedb"):
raise Exception("Please specify a database for finding scenarios")
periods_list = {}
periods_set = set()
con = sqlite3.connect(inp_f)
cur = con.cursor() # a database cursor is a control structure that enables traversal over the records in a database
con.text_factory = str # this ensures data is explored with the correct UTF-8 encoding
print(inp_f)
cur.execute("SELECT DISTINCT scenario FROM Output_VFlow_Out")
x = []
for row in cur:
x.append(row[0])
for y in x:
cur.execute("SELECT DISTINCT t_periods FROM Output_VFlow_Out WHERE scenario is '"+str(y)+"'")
periods_list[y] = []
for per in cur:
z = per[0]
periods_list[y].append(z)
cur.close()
con.close()
return dict ( OrderedDict ( sorted(periods_list.items(), key=lambda x: x[1]) ) )
def get_scenario(inp_f):
file_ty = re.search(r"(\w+)\.(\w+)\b", inp_f) # Extract the input filename and extension
if not file_ty :
raise "The file type %s is not recognized." % inp_f
elif file_ty.group(2) not in ("db", "sqlite", "sqlite3", "sqlitedb") :
raise Exception("Please specify a database for finding scenarios")
scene_list = {}
scene_set = set()
con = sqlite3.connect(inp_f)
cur = con.cursor() # a database cursor is a control structure that enables traversal over the records in a database
con.text_factory = str #this ensures data is explored with the correct UTF-8 encoding
print(inp_f)
cur.execute("SELECT DISTINCT scenario FROM Output_VFlow_Out")
for row in cur:
x = row[0]
scene_list[x] = x
cur.close()
con.close()
return dict ( OrderedDict ( sorted(scene_list.items(), key=lambda x: x[1]) ) )
def get_comm(inp_f, db_dat):
comm_list = {}
comm_set = set()
is_query_empty = False
if not db_dat :
con = sqlite3.connect(inp_f)
cur = con.cursor() # a database cursor is a control structure that enables traversal over the records in a database
con.text_factory = str #this ensures data is explored with the correct UTF-8 encoding
print(inp_f)
cur.execute("SELECT DISTINCT comm_name FROM commodities")
for row in cur:
is_query_empty = True
if row[0] != 'ethos':
x= row[0]
comm_list[x] = x
if not is_query_empty:
cur.execute("SELECT input_comm FROM Output_VFlow_Out UNION SELECT output_comm FROM Output_VFlow_Out")
for row in cur:
if row[0] != 'ethos':
x= row[0]
comm_list[x] = x
cur.close()
con.close()
else:
eff_flag = False
with open (inp_f) as f :
for line in f:
if eff_flag is False and re.search("^\s*param\s+efficiency\s*[:][=]", line, flags = re.I) :
#Search for the line param Efficiency := (The script recognizes the commodities specified in this section)
eff_flag = True
elif eff_flag :
line = re.sub("[#].*$", " ", line)
if re.search("^\s*;\s*$", line) :
break # Finish searching this section when encounter a ';'
if re.search("^\s+$", line) :
continue
line = re.sub("^\s+|\s+$", "", line)
row = re.split("\s+", line)
if row[0] != 'ethos':
comm_set.add(row[0])
comm_set.add(row[3])
if eff_flag is False :
print("Error: The Efficiency Parameters cannot be found in the specified file - "+inp_f)
sys.exit(2)
for x in comm_set:
comm_list[x] = x
return OrderedDict ( sorted(comm_list.items(), key=lambda x: x[1]) )
def get_tech(inp_f, db_dat):
tech_list = {}
tech_set = set()
is_query_empty = False
if not db_dat :
con = sqlite3.connect(inp_f)
cur = con.cursor() # a database cursor is a control structure that enables traversal over the records in a database
con.text_factory = str #this ensures data is explored with the correct UTF-8 encoding
print(inp_f)
cur.execute("SELECT DISTINCT tech FROM technologies")
for row in cur:
is_query_empty = True
x= row[0]
tech_list[x] = x
if not is_query_empty:
cur.execute("SELECT DISTINCT tech FROM Output_VFlow_Out")
for row in cur:
x= row[0]
tech_list[x] = x
cur.close()
con.close()
else:
eff_flag = False
with open (inp_f) as f :
for line in f:
if eff_flag is False and re.search("^\s*param\s+efficiency\s*[:][=]", line, flags = re.I) :
#Search for the line param Efficiency := (The script recognizes the commodities specified in this section)
eff_flag = True
elif eff_flag :
line = re.sub("[#].*$", " ", line)
if re.search("^\s*;\s*$", line) :
break # Finish searching this section when encounter a ';'
if re.search("^\s+$", line) :
continue
line = re.sub("^\s+|\s+$", "", line)
row = re.split("\s+", line)
tech_set.add(row[1])
if eff_flag is False :
print("Error: The Efficiency Parameters cannot be found in the specified file - "+inp_f)
sys.exit(2)
for x in tech_set:
tech_list[x] = x
return OrderedDict ( sorted(tech_list.items(), key=lambda x: x[1]) )
def is_db_overwritten(db_file, inp_dat_file):
if os.path.basename(db_file) == '0':
return False
try:
con = sqlite3.connect(db_file)
except:
return False
cur = con.cursor() # A database cursor enables traversal over DB records
con.text_factory = str # This ensures data is explored with UTF-8 encoding
# Copy tables from Input File to DB file.
# IF output file is empty database.
cur.execute("SELECT * FROM technologies")
is_db_empty = False # False for empty db file
for elem in cur:
is_db_empty = True # True for non-empty db file
break
# This file could be schema with populated results from previous run. Or it could be a normal db file.
if is_db_empty:
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='input_file';")
does_input_file_table_exist = False
for i in cur: # This means that the 'input_file' table exists in db.
does_input_file_table_exist = True
if does_input_file_table_exist: # This block distinguishes normal database from schema.
# This is schema file.
cur.execute("SELECT file FROM input_file WHERE id is '1';")
for i in cur:
tagged_file = i[0]
tagged_file = re.sub('["]', "", tagged_file)
cur.close()
con.close()
if tagged_file == inp_dat_file.split(".")[0] + ".dat":
# If Input_file name matches, no overwriting.
return False
else:
# If not a match, delete output tables and update input_file. Return True
return True
cur.close()
con.close()
return False
def help_user():
print('''Use as:
python get_comm_tech.py -i (or --input) <input filename>
| -c (or --comm) To get a dict of commodities
| -t (or --tech) To get a dict of commodities
| -s (or --scenario) To get a dict of scenarios
| -p (or --period) To get a dict of time periods
| -h (or --help) ''')
def get_info(inputs):
inp_file = None
tech_flag = False
comm_flag = False
scene = False
db_or_dat = False # Means db by default
tperiods_flag = False
if inputs is None:
raise Exception("no arguments found")
for opt, arg in inputs.items():
print("%s == %s" %(opt, arg))
if opt in ("-i", "--input"):
inp_file = arg
elif opt in ("-c", "--comm"):
comm_flag = True
elif opt in ("-t", "--tech"):
tech_flag = True
elif opt in ("-s", "--scenario"):
scene = True
elif opt in ("-p", "--period"):
tperiods_flag = True
elif opt in ("-h", "--help"):
help_user()
sys.exit(2)
if inp_file is None:
raise Exception("Input file not specified")
if tperiods_flag:
if comm_flag or scene or tech_flag:
raise Exception("can only use one flag at a time")
if (comm_flag and tech_flag) or (comm_flag and scene) or(scene and tech_flag) or(comm_flag and tech_flag and scene):
raise Exception("can only use one flag at a time")
if not comm_flag and not tech_flag and not scene and not tperiods_flag:
raise Exception("flag not specified")
file_ty = re.search(r"(\w+)\.(\w+)\b", inp_file) # Extract the input filename and extension
if not file_ty:
raise Exception("The file type {} is not recognized.".format(file_ty))
elif file_ty.group(2) in ("db", "sqlite", "sqlite3", "sqlitedb"):
db_or_dat = False
elif file_ty.group(2) in ("dat", "txt"):
db_or_dat = True
else :
print("The input file type %s is not recognized. Please specify a database or a text file." % inp_f)
sys.exit(2)
if comm_flag:
return get_comm(inp_file, db_or_dat)
if tech_flag:
return get_tech(inp_file, db_or_dat)
if tperiods_flag:
return get_tperiods(inp_file)
if scene:
if db_or_dat:
raise Exception("Please specify a database for finding scenarios")
return get_scenario(inp_file)
if __name__ == "__main__":
try:
argv = sys.argv[1:]
opts, args = getopt.getopt(argv, "hctsi:p", ["help", "comm", "tech", "scenario","input=", "period"])
print(opts)
except getopt.GetoptError:
help_user()
sys.exit(2)
print(get_info( dict(opts) ))
| gpl-2.0 |
wonjunetai/pulse | features/main.py | 1 | 11431 | import json
import time
from features.features_helpers import create_paths_for_cell_line
from features.uniprot_transmem import get_transmembrane_region_features
from features.uniprot_ptm import get_postranscriptional_modification_features
from features.uniprot_elm_read import get_uniprot_elm_features
from features.generate_iupred_file import generate_iupred_file
from features.uniprot_disorder import get_uniprot_disorder_features
from features.uniprot_domain_read import get_uniprot_domain_read
from features.run_pfam_scan import start_pfam_scan
from features.uniprot_core import get_sable_scores
from features.mutation_features import get_mutation_features
from features.conservation_conversion_query import create_query_file
from features.convert_from_hg19_hg18 import use_remap_api
from features.seq_conserv import generate_sequence_conservation_features
from features.event_conserv import generate_event_conservation_feature_table
from features.generate_ml_input import generate_machine_learning_matrix
from features.network_features import generate_network_features
from helpers.normalize_unicode_data import normalize_unicode_data
def feature_extract_cell_line(cell_line, pulse_path, preprocess_input_path, feature_extract_output_path):
# TODO: Could probably be better if you put every element in FEATURES_SETTINGS into a dict?
# TODO: Should move all of param_files_locations to be relative to pulse!
features_settings = json.load(open(pulse_path + '/features/features_settings.json'))
create_paths_for_cell_line(pulse_path, cell_line)
print "Features paths created for: " + cell_line
#########################
# TRANSMEMBRANE SCORING #
#########################
print "Now getting transmembrane region features..."
uniprot_exon_indices_location = preprocess_input_path + '/uniprot_exon_indices_map.out'
uniprot_tm_indices_db_location = normalize_unicode_data(features_settings["F_UNIPROT_TRANSMEM_INDICES_LOCATION"])
uniprot_tm_read_output_location = feature_extract_output_path + '/transmem_read.out'
get_transmembrane_region_features(uniprot_exon_indices_location, uniprot_tm_indices_db_location,
uniprot_tm_read_output_location)
print "Finished getting transmembrane region features."
time.sleep(2)
################
# PTM FEATURES #
################
print "Now getting post-transcriptional modifications..."
uniprot_ptm_db_location = normalize_unicode_data(features_settings["F_PTMS_LOCATION"])
uniprot_ptm_read_output_location = feature_extract_output_path + '/ptm_read.out'
get_postranscriptional_modification_features(uniprot_exon_indices_location, uniprot_ptm_db_location,
uniprot_ptm_read_output_location)
print "Finished getting post-transcriptional modification features."
###################################
# EUKARYOTIC LINEAR MOTIF SCORING #
###################################
print "Now getting eukaryotic linear motif scores..."
uniprot_elm_db_location = normalize_unicode_data(features_settings["F_ELM2_LOCATION"])
uniprot_elm_read_output_location = feature_extract_output_path + '/elm_read.out'
get_uniprot_elm_features(uniprot_exon_indices_location, uniprot_elm_db_location, uniprot_elm_read_output_location)
print "Finished getting eukaryotic linear motif scores."
#############################
# DISORDEROME HELPER SCRIPT #
#############################
print "Now running helper files for disorderome..."
p_seq_output_location = preprocess_input_path + '/p_seq_isoforms.fas'
iupred_isoforms_output_location = feature_extract_output_path + '/iupred_isoforms.out'
iupred_install_path = normalize_unicode_data(features_settings["IUPRED_INSTALL_PATH"])
generate_iupred_file(p_seq_output_location, feature_extract_output_path,
iupred_install_path, iupred_isoforms_output_location)
print "Now done running helper file for disorderome."
########################
# DISORDEROME FEATURES #
########################
print "Now getting disorderome features..."
canonical_db_location = pulse_path + '/input/info_canonical_v3.ddbb'
disorder_read_out_location = feature_extract_output_path + '/disorder_read.out'
get_uniprot_disorder_features(pulse_path, uniprot_exon_indices_location, iupred_isoforms_output_location,
canonical_db_location, disorder_read_out_location)
print "Finished getting disorderome features."
##########################
# PFAM & DOMAIN FEATURES #
##########################
print "Now running pfam_scan..."
pfam_scan_script_location = pulse_path + '/helpers/pfam_scan.pl'
pfam_input_location = preprocess_input_path + '/p_seq_isoforms.fas'
pfam_output_location = feature_extract_output_path + '/pfam_done.out'
hmmer3_data_location = normalize_unicode_data(features_settings["HMMER3_DATA_LOCATION"])
pfam_exit_code = start_pfam_scan(pfam_scan_script_location, pfam_input_location,
pfam_output_location, hmmer3_data_location)
pfam_exit_code = 0
if pfam_exit_code == 0:
print "pfam_scan successful"
print "Now getting uniprot domain features..."
f_pfam_special_db_location = normalize_unicode_data(features_settings["F_PFAM_SPECIAL_LOCATION"])
domain_read_output_location = feature_extract_output_path + '/domain_read.out'
get_uniprot_domain_read(f_pfam_special_db_location, canonical_db_location, uniprot_exon_indices_location,
pfam_output_location, domain_read_output_location)
print "Finished getting uniprot domain features."
#################
# SABLE SCORING #
#################
print "Now getting features for SABLE..."
f_sable_db_location = normalize_unicode_data(features_settings["F_SABLE_LOCATION"])
uniprot_core_output_location = feature_extract_output_path + '/core_read.out'
get_sable_scores(uniprot_exon_indices_location, f_sable_db_location, uniprot_core_output_location)
print "Finished getting features for SABLE."
####################
# MUTATION SCORING #
####################
print "Now getting mutation scores..."
f_mutations_db_location = normalize_unicode_data(features_settings["F_MUTATIONS_LOCATION"])
mutation_features_output_location = feature_extract_output_path + '/mutation_read.out'
get_mutation_features(uniprot_exon_indices_location, f_mutations_db_location,
mutation_features_output_location)
print "Finished getting mutation scores."
#################################
# CONSERVATION/NETWORK FEATURES #
#################################
print "Creating query file conservation/network features..."
conservation_query_output_location = feature_extract_output_path + '/conservation_query.txt'
as_location_file = preprocess_input_path + '/as_location.out'
create_query_file(as_location_file, conservation_query_output_location)
print "Finished creating query file."
print "Converting between hg19 to hg18 using the query file generated..."
remap_api_location = pulse_path + '/helpers/remap_api.pl'
remap_mode = "asm-asm"
remap_from = "GCF_000001405.13"
remap_to = "GCF_000001405.12"
remap_input_location = conservation_query_output_location
remap_output_location = feature_extract_output_path + '/report_conservationQuery.txt.xls'
remap_exit_code = use_remap_api(remap_api_location, remap_mode, remap_from, remap_to,
remap_input_location, remap_output_location)
print "Conversion complete."
remap_exit_code = 0
if remap_exit_code == 0:
print "Now generating sequence conservation features..."
f_phastcons_db_location = normalize_unicode_data(features_settings["F_PHASTCONS_HG18_BED_LOCATION"])
as_location_file = preprocess_input_path + '/as_location.out'
remapped_coordinates_file = feature_extract_output_path + '/report_conservationQuery.txt'
sequence_conservation_output_location = feature_extract_output_path + '/sequenceCon_read.out'
generate_sequence_conservation_features(f_phastcons_db_location, as_location_file,
remapped_coordinates_file, sequence_conservation_output_location)
print "Finished generating sequence conservation features."
print "Now generating event_conservation feature table..."
f_event_conservation_db_location = normalize_unicode_data(features_settings["F_EVENT_CONSERVATION_LOCATION"])
event_conservation_output = feature_extract_output_path + '/eventCon_read.out'
generate_event_conservation_feature_table(f_phastcons_db_location, f_event_conservation_db_location,
as_location_file, remapped_coordinates_file,
event_conservation_output)
print "Finished generating event_conservation feature table."
print "Now generating network features using gene names..."
f_uniprot_genewiki_location = normalize_unicode_data(features_settings["F_UNIPROT_GENEWIKI_LOCATION"])
f_degree_location = normalize_unicode_data(features_settings["F_DEGREE_LOCATION"])
network_features_output_location = feature_extract_output_path + '/degree_read.out'
generate_network_features(f_uniprot_genewiki_location, f_degree_location, uniprot_exon_indices_location,
network_features_output_location)
print "Finished generating network features."
###############################
# GENERATE MATRIX OF FEATURES #
###############################
print "Now generating ML input..."
machine_learning_output_path = pulse_path + '/output/machine/' + cell_line
generated_ml_output = machine_learning_output_path + '/features_headers.txt'
generated_ml_names = machine_learning_output_path + '/names.txt'
ts_read = normalize_unicode_data(features_settings["ML_AS_EVENT_CLASSIFICATION"])
generate_machine_learning_matrix(uniprot_exon_indices_location, uniprot_core_output_location,
network_features_output_location,
disorder_read_out_location, domain_read_output_location,
uniprot_elm_read_output_location, event_conservation_output,
mutation_features_output_location, uniprot_ptm_read_output_location,
sequence_conservation_output_location, uniprot_tm_read_output_location,
ts_read, generated_ml_output, generated_ml_names)
print "Finished generating ML input."
else:
print "Remapping failed"
exit()
else:
print "pfam_scan failed"
exit()
| mit |
glaubitz/fs-uae-debian | arcade/OpenGL/GL/VERSION/GL_1_2_images.py | 9 | 1587 | """Version 1.2 Image-handling functions
Almost all of the 1.2 enhancements are image-handling-related,
so this is, most of the 1.2 wrapper code...
Note that the functions that manually wrap certain operations are
guarded by if simple.functionName checks, so that you can use
if functionName to see if the function is available at run-time.
"""
from OpenGL import wrapper, constants, arrays
from OpenGL.raw.GL.ARB import imaging
from OpenGL.raw.GL.VERSION import GL_1_2 as _simple
from OpenGL.GL.ARB.imaging import *
from OpenGL.GL import images
import ctypes
for suffix,arrayConstant in [
('b', constants.GL_BYTE),
('f', constants.GL_FLOAT),
('i', constants.GL_INT),
('s', constants.GL_SHORT),
('ub', constants.GL_UNSIGNED_BYTE),
('ui', constants.GL_UNSIGNED_INT),
('us', constants.GL_UNSIGNED_SHORT),
]:
for functionName in (
'glTexImage3D',
'glTexSubImage3D', # extension/1.2 standard
):
functionName, function = images.typedImageFunction(
suffix, arrayConstant, getattr(_simple, functionName),
)
globals()[functionName] = function
try:
del function, functionName
except NameError as err:
pass
try:
del suffix,arrayConstant
except NameError as err:
pass
glTexImage3D = images.setDimensionsAsInts(
images.setImageInput(
_simple.glTexImage3D,
typeName = 'type',
)
)
glTexSubImage3D = images.setDimensionsAsInts(
images.setImageInput(
_simple.glTexSubImage3D,
typeName = 'type',
)
)
| gpl-2.0 |
jaimahajan1997/sympy | sympy/printing/jscode.py | 17 | 10902 | """
Javascript code printer
The JavascriptCodePrinter converts single sympy expressions into single
Javascript expressions, using the functions defined in the Javascript
Math object where possible.
"""
from __future__ import print_function, division
from sympy.core import S
from sympy.codegen.ast import Assignment
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
from sympy.core.compatibility import string_types, range
# dictionary mapping sympy function to (argument_conditions, Javascript_function).
# Used in JavascriptCodePrinter._print_Function(self)
known_functions = {
'Abs': 'Math.abs',
'sin': 'Math.sin',
'cos': 'Math.cos',
'tan': 'Math.tan',
'acos': 'Math.acos',
'asin': 'Math.asin',
'atan': 'Math.atan',
'atan2': 'Math.atan2',
'ceiling': 'Math.ceil',
'floor': 'Math.floor',
'sign': 'Math.sign',
'exp': 'Math.exp',
'log': 'Math.log',
}
class JavascriptCodePrinter(CodePrinter):
""""A Printer to convert python expressions to strings of javascript code
"""
printmethod = '_javascript'
language = 'Javascript'
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 15,
'user_functions': {},
'human': True,
'contract': True
}
def __init__(self, settings={}):
CodePrinter.__init__(self, settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "// {0}".format(text)
def _declare_number_const(self, name, value):
return "var {0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for i in range(rows) for j in range(cols))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
loopstart = "for (var %(varble)s=%(start)s; %(varble)s<%(end)s; %(varble)s++){"
for i in indices:
# Javascript arrays start at 0 and end at dimension-1
open_lines.append(loopstart % {
'varble': self._print(i.label),
'start': self._print(i.lower),
'end': self._print(i.upper + 1)})
close_lines.append("}")
return open_lines, close_lines
def _print_Pow(self, expr):
PREC = precedence(expr)
if expr.exp == -1:
return '1/%s' % (self.parenthesize(expr.base, PREC))
elif expr.exp == 0.5:
return 'Math.sqrt(%s)' % self._print(expr.base)
else:
return 'Math.pow(%s, %s)' % (self._print(expr.base),
self._print(expr.exp))
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return '%d/%d' % (p, q)
def _print_Indexed(self, expr):
# calculate index for 1d array
dims = expr.shape
elem = S.Zero
offset = S.One
for i in reversed(range(expr.rank)):
elem += expr.indices[i]*offset
offset *= dims[i]
return "%s[%s]" % (self._print(expr.base.label), self._print(elem))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Exp1(self, expr):
return "Math.E"
def _print_Pi(self, expr):
return 'Math.PI'
def _print_Infinity(self, expr):
return 'Number.POSITIVE_INFINITY'
def _print_NegativeInfinity(self, expr):
return 'Number.NEGATIVE_INFINITY'
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) {" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else {")
else:
lines.append("else if (%s) {" % self._print(c))
code0 = self._print(e)
lines.append(code0)
lines.append("}")
return "\n".join(lines)
else:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
ecpairs = ["((%s) ? (\n%s\n)\n" % (self._print(c), self._print(e))
for e, c in expr.args[:-1]]
last_line = ": (\n%s\n)" % self._print(expr.args[-1].expr)
return ": ".join(ecpairs) + last_line + " ".join([")"*len(ecpairs)])
def _print_MatrixElement(self, expr):
return "{0}[{1}]".format(expr.parent, expr.j +
expr.i*expr.parent.shape[1])
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_token = ('{', '(', '{\n', '(\n')
dec_token = ('}', ')')
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any(map(line.endswith, inc_token))) for line in code ]
decrease = [ int(any(map(line.startswith, dec_token)))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def jscode(expr, assign_to=None, **settings):
"""Converts an expr to a string of javascript code
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This is helpful in case of
line-wrapping, or for expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=15].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, js_function_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
Examples
========
>>> from sympy import jscode, symbols, Rational, sin, ceiling, Abs
>>> x, tau = symbols("x, tau")
>>> jscode((2*tau)**Rational(7, 2))
'8*Math.sqrt(2)*Math.pow(tau, 7/2)'
>>> jscode(sin(x), assign_to="s")
's = Math.sin(x);'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e. [(argument_test,
js_function_string)].
>>> custom_functions = {
... "ceiling": "CEIL",
... "Abs": [(lambda x: not x.is_integer, "fabs"),
... (lambda x: x.is_integer, "ABS")]
... }
>>> jscode(Abs(x) + ceiling(x), user_functions=custom_functions)
'fabs(x) + CEIL(x)'
``Piecewise`` expressions are converted into conditionals. If an
``assign_to`` variable is provided an if statement is created, otherwise
the ternary operator is used. Note that if the ``Piecewise`` lacks a
default term, represented by ``(expr, True)`` then an error will be thrown.
This is to prevent generating an expression that may not evaluate to
anything.
>>> from sympy import Piecewise
>>> expr = Piecewise((x + 1, x > 0), (x, True))
>>> print(jscode(expr, tau))
if (x > 0) {
tau = x + 1;
}
else {
tau = x;
}
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e=Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> jscode(e.rhs, assign_to=e.lhs, contract=False)
'Dy[i] = (y[i + 1] - y[i])/(t[i + 1] - t[i]);'
Matrices are also supported, but a ``MatrixSymbol`` of the same dimensions
must be provided to ``assign_to``. Note that any expression that can be
generated normally can also exist inside a Matrix:
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([x**2, Piecewise((x + 1, x > 0), (x, True)), sin(x)])
>>> A = MatrixSymbol('A', 3, 1)
>>> print(jscode(mat, A))
A[0] = Math.pow(x, 2);
if (x > 0) {
A[1] = x + 1;
}
else {
A[1] = x;
}
A[2] = Math.sin(x);
"""
return JavascriptCodePrinter(settings).doprint(expr, assign_to)
def print_jscode(expr, **settings):
"""Prints the Javascript representation of the given expression.
See jscode for the meaning of the optional arguments.
"""
print(jscode(expr, **settings))
| bsd-3-clause |
divio/django | tests/template_tests/syntax_tests/test_extends.py | 86 | 15503 | from django.test import SimpleTestCase
from ..utils import setup
inheritance_templates = {
'inheritance01': "1{% block first %}&{% endblock %}3{% block second %}_{% endblock %}",
'inheritance02': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance03': "{% extends 'inheritance02' %}",
'inheritance04': "{% extends 'inheritance01' %}",
'inheritance05': "{% extends 'inheritance02' %}",
'inheritance06': "{% extends foo %}",
'inheritance07': "{% extends 'inheritance01' %}{% block second %}5{% endblock %}",
'inheritance08': "{% extends 'inheritance02' %}{% block second %}5{% endblock %}",
'inheritance09': "{% extends 'inheritance04' %}",
'inheritance10': "{% extends 'inheritance04' %} ",
'inheritance11': "{% extends 'inheritance04' %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance12': "{% extends 'inheritance07' %}{% block first %}2{% endblock %}",
'inheritance13': "{% extends 'inheritance02' %}"
"{% block first %}a{% endblock %}{% block second %}b{% endblock %}",
'inheritance14': "{% extends 'inheritance01' %}{% block newblock %}NO DISPLAY{% endblock %}",
'inheritance15': "{% extends 'inheritance01' %}"
"{% block first %}2{% block inner %}inner{% endblock %}{% endblock %}",
'inheritance16': "{% extends 'inheritance15' %}{% block inner %}out{% endblock %}",
'inheritance17': "{% load testtags %}{% block first %}1234{% endblock %}",
'inheritance18': "{% load testtags %}{% echo this that theother %}5678",
'inheritance19': "{% extends 'inheritance01' %}"
"{% block first %}{% load testtags %}{% echo 400 %}5678{% endblock %}",
'inheritance20': "{% extends 'inheritance01' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance21': "{% extends 'inheritance02' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance22': "{% extends 'inheritance04' %}{% block first %}{{ block.super }}a{% endblock %}",
'inheritance23': "{% extends 'inheritance20' %}{% block first %}{{ block.super }}b{% endblock %}",
'inheritance24': "{% extends context_template %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance25': "{% extends context_template.1 %}"
"{% block first %}2{% endblock %}{% block second %}4{% endblock %}",
'inheritance26': "no tags",
'inheritance27': "{% extends 'inheritance26' %}",
'inheritance 28': "{% block first %}!{% endblock %}",
'inheritance29': "{% extends 'inheritance 28' %}",
'inheritance30': "1{% if optional %}{% block opt %}2{% endblock %}{% endif %}3",
'inheritance31': "{% extends 'inheritance30' %}{% block opt %}two{% endblock %}",
'inheritance32': "{% extends 'inheritance30' %}{% block opt %}two{% endblock %}",
'inheritance33': "1{% ifequal optional 1 %}{% block opt %}2{% endblock %}{% endifequal %}3",
'inheritance34': "{% extends 'inheritance33' %}{% block opt %}two{% endblock %}",
'inheritance35': "{% extends 'inheritance33' %}{% block opt %}two{% endblock %}",
'inheritance36': "{% for n in numbers %}_{% block opt %}{{ n }}{% endblock %}{% endfor %}_",
'inheritance37': "{% extends 'inheritance36' %}{% block opt %}X{% endblock %}",
'inheritance38': "{% extends 'inheritance36' %}{% block opt %}X{% endblock %}",
'inheritance39': "{% extends 'inheritance30' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance40': "{% extends 'inheritance33' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance41': "{% extends 'inheritance36' %}{% block opt %}new{{ block.super }}{% endblock %}",
'inheritance42': "{% extends 'inheritance02'|cut:' ' %}",
}
class InheritanceTests(SimpleTestCase):
libraries = {'testtags': 'template_tests.templatetags.testtags'}
@setup(inheritance_templates)
def test_inheritance01(self):
"""
Standard template with no inheritance
"""
output = self.engine.render_to_string('inheritance01')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance02(self):
"""
Standard two-level inheritance
"""
output = self.engine.render_to_string('inheritance02')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance03(self):
"""
Three-level with no redefinitions on third level
"""
output = self.engine.render_to_string('inheritance03')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance04(self):
"""
Two-level with no redefinitions on second level
"""
output = self.engine.render_to_string('inheritance04')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance05(self):
"""
Two-level with double quotes instead of single quotes
"""
output = self.engine.render_to_string('inheritance05')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance06(self):
"""
Three-level with variable parent-template name
"""
output = self.engine.render_to_string('inheritance06', {'foo': 'inheritance02'})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance07(self):
"""
Two-level with one block defined, one block not defined
"""
output = self.engine.render_to_string('inheritance07')
self.assertEqual(output, '1&35')
@setup(inheritance_templates)
def test_inheritance08(self):
"""
Three-level with one block defined on this level, two blocks
defined next level
"""
output = self.engine.render_to_string('inheritance08')
self.assertEqual(output, '1235')
@setup(inheritance_templates)
def test_inheritance09(self):
"""
Three-level with second and third levels blank
"""
output = self.engine.render_to_string('inheritance09')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance10(self):
"""
Three-level with space NOT in a block -- should be ignored
"""
output = self.engine.render_to_string('inheritance10')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance11(self):
"""
Three-level with both blocks defined on this level, but none on
second level
"""
output = self.engine.render_to_string('inheritance11')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance12(self):
"""
Three-level with this level providing one and second level
providing the other
"""
output = self.engine.render_to_string('inheritance12')
self.assertEqual(output, '1235')
@setup(inheritance_templates)
def test_inheritance13(self):
"""
Three-level with this level overriding second level
"""
output = self.engine.render_to_string('inheritance13')
self.assertEqual(output, '1a3b')
@setup(inheritance_templates)
def test_inheritance14(self):
"""
A block defined only in a child template shouldn't be displayed
"""
output = self.engine.render_to_string('inheritance14')
self.assertEqual(output, '1&3_')
@setup(inheritance_templates)
def test_inheritance15(self):
"""
A block within another block
"""
output = self.engine.render_to_string('inheritance15')
self.assertEqual(output, '12inner3_')
@setup(inheritance_templates)
def test_inheritance16(self):
"""
A block within another block (level 2)
"""
output = self.engine.render_to_string('inheritance16')
self.assertEqual(output, '12out3_')
@setup(inheritance_templates)
def test_inheritance17(self):
"""
{% load %} tag (parent -- setup for exception04)
"""
output = self.engine.render_to_string('inheritance17')
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance18(self):
"""
{% load %} tag (standard usage, without inheritance)
"""
output = self.engine.render_to_string('inheritance18')
self.assertEqual(output, 'this that theother5678')
@setup(inheritance_templates)
def test_inheritance19(self):
"""
{% load %} tag (within a child template)
"""
output = self.engine.render_to_string('inheritance19')
self.assertEqual(output, '140056783_')
@setup(inheritance_templates)
def test_inheritance20(self):
"""
Two-level inheritance with {{ block.super }}
"""
output = self.engine.render_to_string('inheritance20')
self.assertEqual(output, '1&a3_')
@setup(inheritance_templates)
def test_inheritance21(self):
"""
Three-level inheritance with {{ block.super }} from parent
"""
output = self.engine.render_to_string('inheritance21')
self.assertEqual(output, '12a34')
@setup(inheritance_templates)
def test_inheritance22(self):
"""
Three-level inheritance with {{ block.super }} from grandparent
"""
output = self.engine.render_to_string('inheritance22')
self.assertEqual(output, '1&a3_')
@setup(inheritance_templates)
def test_inheritance23(self):
"""
Three-level inheritance with {{ block.super }} from parent and
grandparent
"""
output = self.engine.render_to_string('inheritance23')
self.assertEqual(output, '1&ab3_')
@setup(inheritance_templates)
def test_inheritance24(self):
"""
Inheritance from local context without use of template loader
"""
context_template = self.engine.from_string("1{% block first %}_{% endblock %}3{% block second %}_{% endblock %}")
output = self.engine.render_to_string('inheritance24', {'context_template': context_template})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance25(self):
"""
Inheritance from local context with variable parent template
"""
context_template = [
self.engine.from_string("Wrong"),
self.engine.from_string("1{% block first %}_{% endblock %}3{% block second %}_{% endblock %}"),
]
output = self.engine.render_to_string('inheritance25', {'context_template': context_template})
self.assertEqual(output, '1234')
@setup(inheritance_templates)
def test_inheritance26(self):
"""
Set up a base template to extend
"""
output = self.engine.render_to_string('inheritance26')
self.assertEqual(output, 'no tags')
@setup(inheritance_templates)
def test_inheritance27(self):
"""
Inheritance from a template that doesn't have any blocks
"""
output = self.engine.render_to_string('inheritance27')
self.assertEqual(output, 'no tags')
@setup(inheritance_templates)
def test_inheritance_28(self):
"""
Set up a base template with a space in it.
"""
output = self.engine.render_to_string('inheritance 28')
self.assertEqual(output, '!')
@setup(inheritance_templates)
def test_inheritance29(self):
"""
Inheritance from a template with a space in its name should work.
"""
output = self.engine.render_to_string('inheritance29')
self.assertEqual(output, '!')
@setup(inheritance_templates)
def test_inheritance30(self):
"""
Base template, putting block in a conditional {% if %} tag
"""
output = self.engine.render_to_string('inheritance30', {'optional': True})
self.assertEqual(output, '123')
# Inherit from a template with block wrapped in an {% if %} tag
# (in parent), still gets overridden
@setup(inheritance_templates)
def test_inheritance31(self):
output = self.engine.render_to_string('inheritance31', {'optional': True})
self.assertEqual(output, '1two3')
@setup(inheritance_templates)
def test_inheritance32(self):
output = self.engine.render_to_string('inheritance32')
self.assertEqual(output, '13')
@setup(inheritance_templates)
def test_inheritance33(self):
"""
Base template, putting block in a conditional {% ifequal %} tag
"""
output = self.engine.render_to_string('inheritance33', {'optional': 1})
self.assertEqual(output, '123')
@setup(inheritance_templates)
def test_inheritance34(self):
"""
Inherit from a template with block wrapped in an {% ifequal %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance34', {'optional': 1})
self.assertEqual(output, '1two3')
@setup(inheritance_templates)
def test_inheritance35(self):
"""
Inherit from a template with block wrapped in an {% ifequal %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance35', {'optional': 2})
self.assertEqual(output, '13')
@setup(inheritance_templates)
def test_inheritance36(self):
"""
Base template, putting block in a {% for %} tag
"""
output = self.engine.render_to_string('inheritance36', {'numbers': '123'})
self.assertEqual(output, '_1_2_3_')
@setup(inheritance_templates)
def test_inheritance37(self):
"""
Inherit from a template with block wrapped in an {% for %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance37', {'numbers': '123'})
self.assertEqual(output, '_X_X_X_')
@setup(inheritance_templates)
def test_inheritance38(self):
"""
Inherit from a template with block wrapped in an {% for %} tag
(in parent), still gets overridden
"""
output = self.engine.render_to_string('inheritance38')
self.assertEqual(output, '_')
# The super block will still be found.
@setup(inheritance_templates)
def test_inheritance39(self):
output = self.engine.render_to_string('inheritance39', {'optional': True})
self.assertEqual(output, '1new23')
@setup(inheritance_templates)
def test_inheritance40(self):
output = self.engine.render_to_string('inheritance40', {'optional': 1})
self.assertEqual(output, '1new23')
@setup(inheritance_templates)
def test_inheritance41(self):
output = self.engine.render_to_string('inheritance41', {'numbers': '123'})
self.assertEqual(output, '_new1_new2_new3_')
@setup(inheritance_templates)
def test_inheritance42(self):
"""
Expression starting and ending with a quote
"""
output = self.engine.render_to_string('inheritance42')
self.assertEqual(output, '1234')
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.