repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
tashaxe/Red-DiscordBot | refs/heads/develop | lib/requests/__init__.py | 71 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2016 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.13.0'
__build__ = 0x021300
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
import warnings
# urllib3's DependencyWarnings should be silenced.
from .packages.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
|
dnxbjyj/python-basic | refs/heads/master | useful-func/map/test_map.py | 1 | # coding:utf-8
# 测试map函数的用法
'''
Docstring:
map(function, sequence[, sequence, ...]) -> list
Return a list of the results of applying the function to the items of
the argument sequence(s). If more than one sequence is given, the
function is called with an argument list consisting of the corresponding
item of each sequence, substituting None for missing values when not all
sequences have the same length. If the function is None, return a list of
the items of the sequence (or a list of tuples if more than one sequence).
从map函数的文档中可以看出,该函数的第一个参数为一个函数对象,后面可以跟一个或多个序列,函数的返回值是一个list.
如果函数不为None,那么返回的结果list的第i个元素,是将该函数作用于每个序列的第i个元素的结果。如果传入的序列的长度不都是相同的,
那么结果list的某些元素将会是None.
如果函数为None,那么返回的的结果list的第i个元素,是每个序列第i个元素组成的n元组(n为序列的个数),如果每个序列的长度不都是相同的,那么结果list
的某些元素将是None.
'''
def main():
a = [1,2,3,4]
b = [3,5,9]
c = [8,2,3]
print map(None,a,b,c)
# 输出:[(1, 3, 8), (2, 5, 2), (3, 9, 3), (4, None, None)]
print map(lambda x : x ** 2,a)
# 输出:[1, 4, 9, 16]
# print map(lambda x,y : x + y,a)
# 输出:TypeError <lambda>() takes exactly 2 arguments (1 given)
print map(lambda x,y : x + y,b,c)
# 输出:[11, 7, 12]
# print map(lambda x,y,z : x + y + z,a,b,c)
# 输出:TypeError: unsupported operand type(s) for +: 'int' and 'NoneType'
print map(lambda x,y : x + y if x is not None and y is not None else None,a,b)
# 输出:[4, 7, 12, None]
if __name__ == '__main__':
main() |
kamyu104/django | refs/heads/master | tests/inline_formsets/tests.py | 326 | from __future__ import unicode_literals
from django.forms.models import inlineformset_factory
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import Child, Parent, Poem, Poet, School
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True, fields="__all__")
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': 'test',
'poem_set-0-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True, fields="__all__")
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '0',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': '',
'poem_set-0-poem': '1',
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True, fields="__all__")
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': six.text_type(poem.id),
'poem_set-0-poem': six.text_type(poem.id),
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name='test')
mother = Parent.objects.create(name='mother')
father = Parent.objects.create(name='father')
data = {
'child_set-TOTAL_FORMS': '1',
'child_set-INITIAL_FORMS': '0',
'child_set-MAX_NUM_FORMS': '0',
'child_set-0-name': 'child',
}
formset = ChildFormSet(data, instance=school)
self.assertEqual(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
class InlineFormsetFactoryTest(TestCase):
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name='mother', fields="__all__")
inlineformset_factory(Parent, Child, fk_name='father', fields="__all__")
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
six.assertRaisesRegex(
self,
ValueError,
"'inline_formsets.Child' has more than one ForeignKey to 'inline_formsets.Parent'.",
inlineformset_factory, Parent, Child
)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
self.assertRaises(
Exception,
"fk_name 'school' is not a ForeignKey to <class 'inline_formsets.models.Parent'>",
inlineformset_factory, Parent, Child, fk_name='school'
)
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
six.assertRaisesRegex(
self, ValueError,
"'inline_formsets.Child' has no field named 'test'.",
inlineformset_factory, Parent, Child, fk_name='test'
)
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(
Parent, Child, exclude=['school'], fk_name='mother'
)
inlineformset_factory(
Parent, Child, exclude=('school',), fk_name='mother'
)
@skipUnlessDBFeature('allows_auto_pk_0')
def test_zero_primary_key(self):
# Regression test for #21472
poet = Poet.objects.create(id=0, name='test')
poet.poem_set.create(name='test poem')
PoemFormSet = inlineformset_factory(Poet, Poem, fields="__all__", extra=0)
formset = PoemFormSet(None, instance=poet)
self.assertEqual(len(formset.forms), 1)
|
dash-dash/pyzmq | refs/heads/master | zmq/auth/base.py | 7 | """Base implementation of 0MQ authentication."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import logging
import zmq
from zmq.utils import z85
from zmq.utils.strtypes import bytes, unicode, b, u
from zmq.error import _check_version
from .certs import load_certificates
CURVE_ALLOW_ANY = '*'
VERSION = b'1.0'
class Authenticator(object):
"""Implementation of ZAP authentication for zmq connections.
Note:
- libzmq provides four levels of security: default NULL (which the Authenticator does
not see), and authenticated NULL, PLAIN, and CURVE, which the Authenticator can see.
- until you add policies, all incoming NULL connections are allowed
(classic ZeroMQ behavior), and all PLAIN and CURVE connections are denied.
"""
def __init__(self, context=None, encoding='utf-8', log=None):
_check_version((4,0), "security")
self.context = context or zmq.Context.instance()
self.encoding = encoding
self.allow_any = False
self.zap_socket = None
self.whitelist = set()
self.blacklist = set()
# passwords is a dict keyed by domain and contains values
# of dicts with username:password pairs.
self.passwords = {}
# certs is dict keyed by domain and contains values
# of dicts keyed by the public keys from the specified location.
self.certs = {}
self.log = log or logging.getLogger('zmq.auth')
def start(self):
"""Create and bind the ZAP socket"""
self.zap_socket = self.context.socket(zmq.REP)
self.zap_socket.linger = 1
self.zap_socket.bind("inproc://zeromq.zap.01")
def stop(self):
"""Close the ZAP socket"""
if self.zap_socket:
self.zap_socket.close()
self.zap_socket = None
def allow(self, *addresses):
"""Allow (whitelist) IP address(es).
Connections from addresses not in the whitelist will be rejected.
- For NULL, all clients from this address will be accepted.
- For PLAIN and CURVE, they will be allowed to continue with authentication.
whitelist is mutually exclusive with blacklist.
"""
if self.blacklist:
raise ValueError("Only use a whitelist or a blacklist, not both")
self.whitelist.update(addresses)
def deny(self, *addresses):
"""Deny (blacklist) IP address(es).
Addresses not in the blacklist will be allowed to continue with authentication.
Blacklist is mutually exclusive with whitelist.
"""
if self.whitelist:
raise ValueError("Only use a whitelist or a blacklist, not both")
self.blacklist.update(addresses)
def configure_plain(self, domain='*', passwords=None):
"""Configure PLAIN authentication for a given domain.
PLAIN authentication uses a plain-text password file.
To cover all domains, use "*".
You can modify the password file at any time; it is reloaded automatically.
"""
if passwords:
self.passwords[domain] = passwords
def configure_curve(self, domain='*', location=None):
"""Configure CURVE authentication for a given domain.
CURVE authentication uses a directory that holds all public client certificates,
i.e. their public keys.
To cover all domains, use "*".
You can add and remove certificates in that directory at any time.
To allow all client keys without checking, specify CURVE_ALLOW_ANY for the location.
"""
# If location is CURVE_ALLOW_ANY then allow all clients. Otherwise
# treat location as a directory that holds the certificates.
if location == CURVE_ALLOW_ANY:
self.allow_any = True
else:
self.allow_any = False
try:
self.certs[domain] = load_certificates(location)
except Exception as e:
self.log.error("Failed to load CURVE certs from %s: %s", location, e)
def handle_zap_message(self, msg):
"""Perform ZAP authentication"""
if len(msg) < 6:
self.log.error("Invalid ZAP message, not enough frames: %r", msg)
if len(msg) < 2:
self.log.error("Not enough information to reply")
else:
self._send_zap_reply(msg[1], b"400", b"Not enough frames")
return
version, request_id, domain, address, identity, mechanism = msg[:6]
credentials = msg[6:]
domain = u(domain, self.encoding, 'replace')
address = u(address, self.encoding, 'replace')
if (version != VERSION):
self.log.error("Invalid ZAP version: %r", msg)
self._send_zap_reply(request_id, b"400", b"Invalid version")
return
self.log.debug("version: %r, request_id: %r, domain: %r,"
" address: %r, identity: %r, mechanism: %r",
version, request_id, domain,
address, identity, mechanism,
)
# Is address is explicitly whitelisted or blacklisted?
allowed = False
denied = False
reason = b"NO ACCESS"
if self.whitelist:
if address in self.whitelist:
allowed = True
self.log.debug("PASSED (whitelist) address=%s", address)
else:
denied = True
reason = b"Address not in whitelist"
self.log.debug("DENIED (not in whitelist) address=%s", address)
elif self.blacklist:
if address in self.blacklist:
denied = True
reason = b"Address is blacklisted"
self.log.debug("DENIED (blacklist) address=%s", address)
else:
allowed = True
self.log.debug("PASSED (not in blacklist) address=%s", address)
# Perform authentication mechanism-specific checks if necessary
username = u("user")
if not denied:
if mechanism == b'NULL' and not allowed:
# For NULL, we allow if the address wasn't blacklisted
self.log.debug("ALLOWED (NULL)")
allowed = True
elif mechanism == b'PLAIN':
# For PLAIN, even a whitelisted address must authenticate
if len(credentials) != 2:
self.log.error("Invalid PLAIN credentials: %r", credentials)
self._send_zap_reply(request_id, b"400", b"Invalid credentials")
return
username, password = [ u(c, self.encoding, 'replace') for c in credentials ]
allowed, reason = self._authenticate_plain(domain, username, password)
elif mechanism == b'CURVE':
# For CURVE, even a whitelisted address must authenticate
if len(credentials) != 1:
self.log.error("Invalid CURVE credentials: %r", credentials)
self._send_zap_reply(request_id, b"400", b"Invalid credentials")
return
key = credentials[0]
allowed, reason = self._authenticate_curve(domain, key)
if allowed:
self._send_zap_reply(request_id, b"200", b"OK", username)
else:
self._send_zap_reply(request_id, b"400", reason)
def _authenticate_plain(self, domain, username, password):
"""PLAIN ZAP authentication"""
allowed = False
reason = b""
if self.passwords:
# If no domain is not specified then use the default domain
if not domain:
domain = '*'
if domain in self.passwords:
if username in self.passwords[domain]:
if password == self.passwords[domain][username]:
allowed = True
else:
reason = b"Invalid password"
else:
reason = b"Invalid username"
else:
reason = b"Invalid domain"
if allowed:
self.log.debug("ALLOWED (PLAIN) domain=%s username=%s password=%s",
domain, username, password,
)
else:
self.log.debug("DENIED %s", reason)
else:
reason = b"No passwords defined"
self.log.debug("DENIED (PLAIN) %s", reason)
return allowed, reason
def _authenticate_curve(self, domain, client_key):
"""CURVE ZAP authentication"""
allowed = False
reason = b""
if self.allow_any:
allowed = True
reason = b"OK"
self.log.debug("ALLOWED (CURVE allow any client)")
else:
# If no explicit domain is specified then use the default domain
if not domain:
domain = '*'
if domain in self.certs:
# The certs dict stores keys in z85 format, convert binary key to z85 bytes
z85_client_key = z85.encode(client_key)
if self.certs[domain].get(z85_client_key):
allowed = True
reason = b"OK"
else:
reason = b"Unknown key"
status = "ALLOWED" if allowed else "DENIED"
self.log.debug("%s (CURVE) domain=%s client_key=%s",
status, domain, z85_client_key,
)
else:
reason = b"Unknown domain"
return allowed, reason
def _send_zap_reply(self, request_id, status_code, status_text, user_id='user'):
"""Send a ZAP reply to finish the authentication."""
user_id = user_id if status_code == b'200' else b''
if isinstance(user_id, unicode):
user_id = user_id.encode(self.encoding, 'replace')
metadata = b'' # not currently used
self.log.debug("ZAP reply code=%s text=%s", status_code, status_text)
reply = [VERSION, request_id, status_code, status_text, user_id, metadata]
self.zap_socket.send_multipart(reply)
__all__ = ['Authenticator', 'CURVE_ALLOW_ANY']
|
40223247/2015test2-1 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/test/re_tests.py | 879 | #!/usr/bin/env python3
# -*- mode: python -*-
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
# test common prefix
('Python|Perl', 'Perl'), # Alternation
('(Python|Perl)', 'Perl'), # Grouped alternation
('Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
# NOTE: not an error under PCRE/PRE:
# (r'\u', '', SYNTAX_ERROR), # A Perl escape
(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)),
(r'\x00f', '\017', FAIL, 'found', chr(15)),
(r'\x00fe', '\376', FAIL, 'found', chr(254)),
# (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
# (r'\x00f', '\017', SUCCEED, 'found', chr(15)),
# (r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
# NOTE: not an error under PCRE/PRE:
# ('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# All tests from Perl
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab{1,}bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('$b', 'b', FAIL),
('a\\', '-', SYNTAX_ERROR),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
# lookbehind: split by : but not if it is escaped by -.
('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ),
# escaping with \ as we know it
('(?<!\\\):(.*?)(?<!\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ),
# terminating with ' and escaping with ? as in edifact
("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
# not an error under PCRE/PRE:
('w(?i)', 'W', SUCCEED, 'found', 'W'),
# ('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
('^abc', """jkl
abc
xyz""", FAIL),
('(?m)^abc', """jkl
abc
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
('a.b', 'a\nb', FAIL),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
# not an error under PCRE/PRE:
# ('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ff', '\377', FAIL),
# (r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
#
# post-1.5.2 additions
# xmllib problem
(r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'),
# bug 110866: reference to undefined group
(r'((.)\1+)', '', SYNTAX_ERROR),
# bug 111869: search (PRE/PCRE fails on this one, SRE doesn't)
(r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'),
# bug 112468: various expected syntax errors
(r'(', '', SYNTAX_ERROR),
(r'[\41]', '!', SUCCEED, 'found', '!'),
# bug 114033: nothing to repeat
(r'(x?)?', 'x', SUCCEED, 'found', 'x'),
# bug 115040: rescan if flags are modified inside pattern
(r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'),
# bug 115618: negative lookahead
(r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'),
# bug 116251: character class bug
(r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'),
# bug 123769+127259: non-greedy backtracking bug
(r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
# bug 127259: \Z shouldn't depend on multiline mode
(r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''),
# bug 128899: uppercase literals under the ignorecase flag
(r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'),
# bug 130748: ^* should be an error (nothing to repeat)
(r'^*', '', SYNTAX_ERROR),
# bug 133283: minimizing repeat problem
(r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'),
# bug 477728: minimizing repeat problem
(r'^.*?$', 'one\ntwo\nthree\n', FAIL),
# bug 483789: minimizing repeat problem
(r'a[^>]*?b', 'a>b', FAIL),
# bug 490573: minimizing repeat problem
(r'^a*?$', 'foo', FAIL),
# bug 470582: nested groups problem
(r'^((a)c)?(ab)$', 'ab', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-ab'),
# another minimizing repeat problem (capturing groups in assertions)
('^([ab]*?)(?=(b)?)c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?!(b))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?<!(a))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
]
u = '\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'
tests.extend([
# bug 410271: \b broken under locales
(r'\b.\b', 'a', SUCCEED, 'found', 'a'),
(r'(?u)\b.\b', u, SUCCEED, 'found', u),
(r'(?u)\w', u, SUCCEED, 'found', u),
])
|
jaggu303619/asylum | refs/heads/master | openerp/report/render/odt2odt/__init__.py | 77 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odt2odt import parseNode
#.apidoc title: ODT to ODT engine
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
heeraj123/oh-mainline | refs/heads/master | vendor/packages/celery/funtests/setup.py | 19 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
from setuptools.command.install import install
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
from setuptools.command.install import install
import os
import sys
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.path.join(os.getcwd(), os.pardir))
import suite
class no_install(install):
def run(self, *args, **kwargs):
import sys
sys.stderr.write("""
------------------------------------------------------
The Celery functional test suite cannot be installed.
------------------------------------------------------
But you can execute the tests by running the command:
$ python setup.py test
""")
setup(
name='celery-funtests',
version="DEV",
description="Functional test suite for Celery",
author="Ask Solem",
author_email="[email protected]",
url="http://github.com/ask/celery",
platforms=["any"],
packages=[],
data_files=[],
zip_safe=False,
cmdclass={"install": no_install},
test_suite="nose.collector",
tests_require=[
"unittest2>=0.4.0",
"simplejson",
"nose",
"pytyrant",
"redis",
"pymongo",
],
classifiers=[
"Operating System :: OS Independent",
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
],
long_description="Do not install this package",
)
|
hsfzxjy/wisecitymbc | refs/heads/master | site_packages/bs4/diagnose.py | 431 | """Diagnostic functions, mainly for use when doing tech support."""
import cProfile
from StringIO import StringIO
from HTMLParser import HTMLParser
import bs4
from bs4 import BeautifulSoup, __version__
from bs4.builder import builder_registry
import os
import pstats
import random
import tempfile
import time
import traceback
import sys
import cProfile
def diagnose(data):
"""Diagnostic suite for isolating common problems."""
print "Diagnostic running on Beautiful Soup %s" % __version__
print "Python version %s" % sys.version
basic_parsers = ["html.parser", "html5lib", "lxml"]
for name in basic_parsers:
for builder in builder_registry.builders:
if name in builder.features:
break
else:
basic_parsers.remove(name)
print (
"I noticed that %s is not installed. Installing it may help." %
name)
if 'lxml' in basic_parsers:
basic_parsers.append(["lxml", "xml"])
from lxml import etree
print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))
if 'html5lib' in basic_parsers:
import html5lib
print "Found html5lib version %s" % html5lib.__version__
if hasattr(data, 'read'):
data = data.read()
elif os.path.exists(data):
print '"%s" looks like a filename. Reading data from the file.' % data
data = open(data).read()
elif data.startswith("http:") or data.startswith("https:"):
print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data
print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup."
return
print
for parser in basic_parsers:
print "Trying to parse your markup with %s" % parser
success = False
try:
soup = BeautifulSoup(data, parser)
success = True
except Exception, e:
print "%s could not parse the markup." % parser
traceback.print_exc()
if success:
print "Here's what %s did with the markup:" % parser
print soup.prettify()
print "-" * 80
def lxml_trace(data, html=True, **kwargs):
"""Print out the lxml events that occur during parsing.
This lets you see how lxml parses a document when no Beautiful
Soup code is running.
"""
from lxml import etree
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
print("%s, %4s, %s" % (event, element.tag, element.text))
class AnnouncingParser(HTMLParser):
"""Announces HTMLParser parse events, without doing anything else."""
def _p(self, s):
print(s)
def handle_starttag(self, name, attrs):
self._p("%s START" % name)
def handle_endtag(self, name):
self._p("%s END" % name)
def handle_data(self, data):
self._p("%s DATA" % data)
def handle_charref(self, name):
self._p("%s CHARREF" % name)
def handle_entityref(self, name):
self._p("%s ENTITYREF" % name)
def handle_comment(self, data):
self._p("%s COMMENT" % data)
def handle_decl(self, data):
self._p("%s DECL" % data)
def unknown_decl(self, data):
self._p("%s UNKNOWN-DECL" % data)
def handle_pi(self, data):
self._p("%s PI" % data)
def htmlparser_trace(data):
"""Print out the HTMLParser events that occur during parsing.
This lets you see how HTMLParser parses a document when no
Beautiful Soup code is running.
"""
parser = AnnouncingParser()
parser.feed(data)
_vowels = "aeiou"
_consonants = "bcdfghjklmnpqrstvwxyz"
def rword(length=5):
"Generate a random word-like string."
s = ''
for i in range(length):
if i % 2 == 0:
t = _consonants
else:
t = _vowels
s += random.choice(t)
return s
def rsentence(length=4):
"Generate a random sentence-like string."
return " ".join(rword(random.randint(4,9)) for i in range(length))
def rdoc(num_elements=1000):
"""Randomly generate an invalid HTML document."""
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
elements = []
for i in range(num_elements):
choice = random.randint(0,3)
if choice == 0:
# New tag.
tag_name = random.choice(tag_names)
elements.append("<%s>" % tag_name)
elif choice == 1:
elements.append(rsentence(random.randint(1,4)))
elif choice == 2:
# Close a tag.
tag_name = random.choice(tag_names)
elements.append("</%s>" % tag_name)
return "<html>" + "\n".join(elements) + "</html>"
def benchmark_parsers(num_elements=100000):
"""Very basic head-to-head performance benchmark."""
print "Comparative parser benchmark on Beautiful Soup %s" % __version__
data = rdoc(num_elements)
print "Generated a large invalid HTML document (%d bytes)." % len(data)
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
success = False
try:
a = time.time()
soup = BeautifulSoup(data, parser)
b = time.time()
success = True
except Exception, e:
print "%s could not parse the markup." % parser
traceback.print_exc()
if success:
print "BS4+%s parsed the markup in %.2fs." % (parser, b-a)
from lxml import etree
a = time.time()
etree.HTML(data)
b = time.time()
print "Raw lxml parsed the markup in %.2fs." % (b-a)
import html5lib
parser = html5lib.HTMLParser()
a = time.time()
parser.parse(data)
b = time.time()
print "Raw html5lib parsed the markup in %.2fs." % (b-a)
def profile(num_elements=100000, parser="lxml"):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
data = rdoc(num_elements)
vars = dict(bs4=bs4, data=data, parser=parser)
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
stats = pstats.Stats(filename)
# stats.strip_dirs()
stats.sort_stats("cumulative")
stats.print_stats('_html5lib|bs4', 50)
if __name__ == '__main__':
diagnose(sys.stdin.read())
|
kbdick/RecycleTracker | refs/heads/gh-pages | recyclecollector/scrap/gdata-2.0.18/build/lib.linux-x86_64-2.7/gdata/tlslite/integration/TLSTwistedProtocolWrapper.py | 237 | """TLS Lite + Twisted."""
from twisted.protocols.policies import ProtocolWrapper, WrappingFactory
from twisted.python.failure import Failure
from AsyncStateMachine import AsyncStateMachine
from gdata.tlslite.TLSConnection import TLSConnection
from gdata.tlslite.errors import *
import socket
import errno
#The TLSConnection is created around a "fake socket" that
#plugs it into the underlying Twisted transport
class _FakeSocket:
def __init__(self, wrapper):
self.wrapper = wrapper
self.data = ""
def send(self, data):
ProtocolWrapper.write(self.wrapper, data)
return len(data)
def recv(self, numBytes):
if self.data == "":
raise socket.error, (errno.EWOULDBLOCK, "")
returnData = self.data[:numBytes]
self.data = self.data[numBytes:]
return returnData
class TLSTwistedProtocolWrapper(ProtocolWrapper, AsyncStateMachine):
"""This class can wrap Twisted protocols to add TLS support.
Below is a complete example of using TLS Lite with a Twisted echo
server.
There are two server implementations below. Echo is the original
protocol, which is oblivious to TLS. Echo1 subclasses Echo and
negotiates TLS when the client connects. Echo2 subclasses Echo and
negotiates TLS when the client sends "STARTTLS"::
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import reactor
from twisted.protocols.policies import WrappingFactory
from twisted.protocols.basic import LineReceiver
from twisted.python import log
from twisted.python.failure import Failure
import sys
from tlslite.api import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
verifierDB = VerifierDB("verifierDB")
verifierDB.open()
class Echo(LineReceiver):
def connectionMade(self):
self.transport.write("Welcome to the echo server!\\r\\n")
def lineReceived(self, line):
self.transport.write(line + "\\r\\n")
class Echo1(Echo):
def connectionMade(self):
if not self.transport.tlsStarted:
self.transport.setServerHandshakeOp(certChain=certChain,
privateKey=privateKey,
verifierDB=verifierDB)
else:
Echo.connectionMade(self)
def connectionLost(self, reason):
pass #Handle any TLS exceptions here
class Echo2(Echo):
def lineReceived(self, data):
if data == "STARTTLS":
self.transport.setServerHandshakeOp(certChain=certChain,
privateKey=privateKey,
verifierDB=verifierDB)
else:
Echo.lineReceived(self, data)
def connectionLost(self, reason):
pass #Handle any TLS exceptions here
factory = Factory()
factory.protocol = Echo1
#factory.protocol = Echo2
wrappingFactory = WrappingFactory(factory)
wrappingFactory.protocol = TLSTwistedProtocolWrapper
log.startLogging(sys.stdout)
reactor.listenTCP(1079, wrappingFactory)
reactor.run()
This class works as follows:
Data comes in and is given to the AsyncStateMachine for handling.
AsyncStateMachine will forward events to this class, and we'll
pass them on to the ProtocolHandler, which will proxy them to the
wrapped protocol. The wrapped protocol may then call back into
this class, and these calls will be proxied into the
AsyncStateMachine.
The call graph looks like this:
- self.dataReceived
- AsyncStateMachine.inReadEvent
- self.out(Connect|Close|Read)Event
- ProtocolWrapper.(connectionMade|loseConnection|dataReceived)
- self.(loseConnection|write|writeSequence)
- AsyncStateMachine.(setCloseOp|setWriteOp)
"""
#WARNING: IF YOU COPY-AND-PASTE THE ABOVE CODE, BE SURE TO REMOVE
#THE EXTRA ESCAPING AROUND "\\r\\n"
def __init__(self, factory, wrappedProtocol):
ProtocolWrapper.__init__(self, factory, wrappedProtocol)
AsyncStateMachine.__init__(self)
self.fakeSocket = _FakeSocket(self)
self.tlsConnection = TLSConnection(self.fakeSocket)
self.tlsStarted = False
self.connectionLostCalled = False
def connectionMade(self):
try:
ProtocolWrapper.connectionMade(self)
except TLSError, e:
self.connectionLost(Failure(e))
ProtocolWrapper.loseConnection(self)
def dataReceived(self, data):
try:
if not self.tlsStarted:
ProtocolWrapper.dataReceived(self, data)
else:
self.fakeSocket.data += data
while self.fakeSocket.data:
AsyncStateMachine.inReadEvent(self)
except TLSError, e:
self.connectionLost(Failure(e))
ProtocolWrapper.loseConnection(self)
def connectionLost(self, reason):
if not self.connectionLostCalled:
ProtocolWrapper.connectionLost(self, reason)
self.connectionLostCalled = True
def outConnectEvent(self):
ProtocolWrapper.connectionMade(self)
def outCloseEvent(self):
ProtocolWrapper.loseConnection(self)
def outReadEvent(self, data):
if data == "":
ProtocolWrapper.loseConnection(self)
else:
ProtocolWrapper.dataReceived(self, data)
def setServerHandshakeOp(self, **args):
self.tlsStarted = True
AsyncStateMachine.setServerHandshakeOp(self, **args)
def loseConnection(self):
if not self.tlsStarted:
ProtocolWrapper.loseConnection(self)
else:
AsyncStateMachine.setCloseOp(self)
def write(self, data):
if not self.tlsStarted:
ProtocolWrapper.write(self, data)
else:
#Because of the FakeSocket, write operations are guaranteed to
#terminate immediately.
AsyncStateMachine.setWriteOp(self, data)
def writeSequence(self, seq):
if not self.tlsStarted:
ProtocolWrapper.writeSequence(self, seq)
else:
#Because of the FakeSocket, write operations are guaranteed to
#terminate immediately.
AsyncStateMachine.setWriteOp(self, "".join(seq))
|
40223232/2015cd_midterm2 | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/keyword.py | 761 | #! /usr/bin/env python3
"""Keywords (from "graminit.c")
This file is automatically generated; please don't muck it up!
To update the symbols in this file, 'cd' to the top directory of
the python source tree after building the interpreter and run:
./python Lib/keyword.py
"""
__all__ = ["iskeyword", "kwlist"]
kwlist = [
#--start keywords--
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
#--end keywords--
]
iskeyword = frozenset(kwlist).__contains__
def main():
import sys, re
args = sys.argv[1:]
iptfile = args and args[0] or "Python/graminit.c"
if len(args) > 1: optfile = args[1]
else: optfile = "Lib/keyword.py"
# scan the source file for keywords
with open(iptfile) as fp:
strprog = re.compile('"([^"]+)"')
lines = []
for line in fp:
if '{1, "' in line:
match = strprog.search(line)
if match:
lines.append(" '" + match.group(1) + "',\n")
lines.sort()
# load the output skeleton from the target
with open(optfile) as fp:
format = fp.readlines()
# insert the lines of keywords
try:
start = format.index("#--start keywords--\n") + 1
end = format.index("#--end keywords--\n")
format[start:end] = lines
except ValueError:
sys.stderr.write("target does not contain format markers\n")
sys.exit(1)
# write the output file
fp = open(optfile, 'w')
fp.write(''.join(format))
fp.close()
if __name__ == "__main__":
main()
|
kidaa/aurora | refs/heads/master | src/main/python/apache/aurora/client/api/scheduler_client.py | 4 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import threading
import time
import traceback
from pystachio import Default, Integer, String
from thrift.protocol import TJSONProtocol
from thrift.transport import TTransport
from twitter.common import log
from twitter.common.concurrent import Timeout, deadline
from twitter.common.quantity import Amount, Time
from twitter.common.zookeeper.kazoo_client import TwitterKazooClient
from twitter.common.zookeeper.serverset import ServerSet
from apache.aurora.common.auth.auth_module_manager import (
SessionKeyError,
get_auth_handler,
make_session_key
)
from apache.aurora.common.cluster import Cluster
from apache.aurora.common.transport import TRequestsTransport
from gen.apache.aurora.api import AuroraAdmin, ReadOnlyScheduler
from gen.apache.aurora.api.constants import THRIFT_API_VERSION
from gen.apache.aurora.api.ttypes import ResponseCode
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
class SchedulerClientTrait(Cluster.Trait):
zk = String # noqa
zk_port = Default(Integer, 2181) # noqa
scheduler_zk_path = String # noqa
scheduler_uri = String # noqa
proxy_url = String # noqa
auth_mechanism = Default(String, 'UNAUTHENTICATED') # noqa
class SchedulerClient(object):
THRIFT_RETRIES = 5
RETRY_TIMEOUT = Amount(1, Time.SECONDS)
class Error(Exception): pass
class CouldNotConnect(Error): pass
# TODO(wickman) Refactor per MESOS-3005 into two separate classes with separate traits:
# ZookeeperClientTrait
# DirectClientTrait
@classmethod
def get(cls, cluster, auth_factory=get_auth_handler, **kwargs):
if not isinstance(cluster, Cluster):
raise TypeError('"cluster" must be an instance of Cluster, got %s' % type(cluster))
cluster = cluster.with_trait(SchedulerClientTrait)
auth_handler = auth_factory(cluster.auth_mechanism)
if cluster.zk:
return ZookeeperSchedulerClient(cluster, port=cluster.zk_port, auth=auth_handler, **kwargs)
elif cluster.scheduler_uri:
return DirectSchedulerClient(cluster.scheduler_uri, auth=auth_handler, **kwargs)
else:
raise ValueError('"cluster" does not specify zk or scheduler_uri')
def __init__(self, auth, user_agent, verbose=False):
self._client = None
self._auth = auth
self._user_agent = user_agent
self._verbose = verbose
def get_thrift_client(self):
if self._client is None:
self._client = self._connect()
return self._client
# per-class implementation -- mostly meant to set up a valid host/port
# pair and then delegate the opening to SchedulerClient._connect_scheduler
def _connect(self):
return None
def _connect_scheduler(self, uri, clock=time):
transport = TRequestsTransport(uri, auth=self._auth, user_agent=self._user_agent)
protocol = TJSONProtocol.TJSONProtocol(transport)
schedulerClient = AuroraAdmin.Client(protocol)
for _ in range(self.THRIFT_RETRIES):
try:
transport.open()
return schedulerClient
except TTransport.TTransportException:
clock.sleep(self.RETRY_TIMEOUT.as_(Time.SECONDS))
continue
except Exception as e:
# Monkey-patched proxies, like socks, can generate a proxy error here.
# without adding a dependency, we can't catch those in a more specific way.
raise self.CouldNotConnect('Connection to scheduler failed: %s' % e)
raise self.CouldNotConnect('Could not connect to %s' % uri)
class ZookeeperSchedulerClient(SchedulerClient):
SERVERSET_TIMEOUT = Amount(10, Time.SECONDS)
@classmethod
def get_scheduler_serverset(cls, cluster, port=2181, verbose=False, **kw):
if cluster.zk is None:
raise ValueError('Cluster has no associated zookeeper ensemble!')
if cluster.scheduler_zk_path is None:
raise ValueError('Cluster has no defined scheduler path, must specify scheduler_zk_path '
'in your cluster config!')
hosts = [h + ':{p}' for h in cluster.zk.split(',')]
zk = TwitterKazooClient.make(str(','.join(hosts).format(p=port)), verbose=verbose)
return zk, ServerSet(zk, cluster.scheduler_zk_path, **kw)
def __init__(self, cluster, port=2181, verbose=False, _deadline=deadline, **kwargs):
SchedulerClient.__init__(self, verbose=verbose, **kwargs)
self._cluster = cluster
self._zkport = port
self._endpoint = None
self._uri = None
self._deadline = _deadline
def _resolve(self):
"""Resolve the uri associated with this scheduler from zookeeper."""
joined = threading.Event()
def on_join(elements):
joined.set()
zk, serverset = self.get_scheduler_serverset(self._cluster, verbose=self._verbose,
port=self._zkport, on_join=on_join)
joined.wait(timeout=self.SERVERSET_TIMEOUT.as_(Time.SECONDS))
try:
# Need to perform this operation in a separate thread, because kazoo will wait for the
# result of this serverset evaluation indefinitely, which will prevent people killing
# the client with keyboard interrupts.
serverset_endpoints = self._deadline(lambda: list(serverset),
timeout=self.SERVERSET_TIMEOUT.as_(Time.SECONDS), daemon=True, propagate=True)
except Timeout:
raise self.CouldNotConnect("Failed to connect to Zookeeper within %d seconds." %
self.SERVERSET_TIMEOUT.as_(Time.SECONDS))
if len(serverset_endpoints) == 0:
raise self.CouldNotConnect('No schedulers detected in %s!' % self._cluster.name)
instance = serverset_endpoints[0]
if 'https' in instance.additional_endpoints:
endpoint = instance.additional_endpoints['https']
self._uri = 'https://%s:%s' % (endpoint.host, endpoint.port)
elif 'http' in instance.additional_endpoints:
endpoint = instance.additional_endpoints['http']
self._uri = 'http://%s:%s' % (endpoint.host, endpoint.port)
zk.stop()
def _connect(self):
if self._uri is None:
self._resolve()
if self._uri is not None:
return self._connect_scheduler(urljoin(self._uri, 'api'))
@property
def url(self):
proxy_url = self._cluster.proxy_url
if proxy_url:
return proxy_url
return self.raw_url
@property
def raw_url(self):
if self._uri is None:
self._resolve()
if self._uri:
return self._uri
class DirectSchedulerClient(SchedulerClient):
def __init__(self, uri, verbose=True, **kwargs):
SchedulerClient.__init__(self, verbose=verbose, **kwargs)
self._uri = uri
def _connect(self):
return self._connect_scheduler(urljoin(self._uri, 'api'))
@property
def url(self):
return self._uri
@property
def raw_url(self):
return self._uri
class SchedulerProxy(object):
"""
This class is responsible for creating a reliable thrift client to the
twitter scheduler. Basically all the dirty work needed by the
AuroraClientAPI.
"""
CONNECT_MAXIMUM_WAIT = Amount(1, Time.MINUTES)
RPC_RETRY_INTERVAL = Amount(5, Time.SECONDS)
RPC_MAXIMUM_WAIT = Amount(10, Time.MINUTES)
class Error(Exception): pass
class TimeoutError(Error): pass
class TransientError(Error): pass
class AuthError(Error): pass
class APIVersionError(Error): pass
class ThriftInternalError(Error): pass
def __init__(self, cluster, verbose=False, session_key_factory=make_session_key, **kwargs):
"""A callable session_key_factory should be provided for authentication"""
self.cluster = cluster
# TODO(Sathya): Make this a part of cluster trait when authentication is pushed to the transport
# layer.
self._session_key_factory = session_key_factory
self._client = self._scheduler_client = None
self.verbose = verbose
self._lock = threading.RLock()
self._terminating = threading.Event()
self._kwargs = kwargs
def with_scheduler(method):
"""Decorator magic to make sure a connection is made to the scheduler"""
def _wrapper(self, *args, **kwargs):
if not self._scheduler_client:
self._construct_scheduler()
return method(self, *args, **kwargs)
return _wrapper
def invalidate(self):
self._client = self._scheduler_client = None
def terminate(self):
"""Requests immediate termination of any retry attempts and invalidates client."""
self._terminating.set()
self.invalidate()
@with_scheduler
def client(self):
return self._client
@with_scheduler
def scheduler_client(self):
return self._scheduler_client
def session_key(self):
try:
return self._session_key_factory(self.cluster.auth_mechanism)
except SessionKeyError as e:
raise self.AuthError('Unable to create session key %s' % e)
def _construct_scheduler(self):
"""
Populates:
self._scheduler_client
self._client
"""
self._scheduler_client = SchedulerClient.get(self.cluster, verbose=self.verbose, **self._kwargs)
assert self._scheduler_client, "Could not find scheduler (cluster = %s)" % self.cluster.name
start = time.time()
while (time.time() - start) < self.CONNECT_MAXIMUM_WAIT.as_(Time.SECONDS):
try:
# this can wind up generating any kind of error, because it turns into
# a call to a dynamically set authentication module.
self._client = self._scheduler_client.get_thrift_client()
break
except SchedulerClient.CouldNotConnect as e:
log.warning('Could not connect to scheduler: %s' % e)
except Exception as e:
# turn any auth module exception into an auth error.
log.debug('Warning: got an unknown exception during authentication:')
log.debug(traceback.format_exc())
raise self.AuthError('Error connecting to scheduler: %s' % e)
if not self._client:
raise self.TimeoutError('Timed out trying to connect to scheduler at %s' % self.cluster.name)
def __getattr__(self, method_name):
# If the method does not exist, getattr will return AttributeError for us.
method = getattr(AuroraAdmin.Client, method_name)
if not callable(method):
return method
@functools.wraps(method)
def method_wrapper(*args):
with self._lock:
start = time.time()
while not self._terminating.is_set() and (
time.time() - start) < self.RPC_MAXIMUM_WAIT.as_(Time.SECONDS):
# Only automatically append a SessionKey if this is not part of the read-only API.
auth_args = () if hasattr(ReadOnlyScheduler.Iface, method_name) else (self.session_key(),)
try:
method = getattr(self.client(), method_name)
if not callable(method):
return method
resp = method(*(args + auth_args))
if resp is not None and resp.responseCode == ResponseCode.ERROR_TRANSIENT:
raise self.TransientError(", ".join(
[m.message for m in resp.details] if resp.details else []))
if resp.serverInfo.thriftAPIVersion != THRIFT_API_VERSION:
raise self.APIVersionError("Client Version: %s, Server Version: %s" %
(THRIFT_API_VERSION, resp.serverInfo.thriftAPIVersion))
return resp
except TRequestsTransport.AuthError as e:
raise self.AuthError(e)
except (TTransport.TTransportException, self.TimeoutError, self.TransientError) as e:
if not self._terminating.is_set():
log.warning('Connection error with scheduler: %s, reconnecting...' % e)
self.invalidate()
self._terminating.wait(self.RPC_RETRY_INTERVAL.as_(Time.SECONDS))
except Exception as e:
# Take any error that occurs during the RPC call, and transform it
# into something clients can handle.
if not self._terminating.is_set():
raise self.ThriftInternalError("Error during thrift call %s to %s: %s" %
(method_name, self.cluster.name, e))
if not self._terminating.is_set():
raise self.TimeoutError('Timed out attempting to issue %s to %s' % (
method_name, self.cluster.name))
return method_wrapper
|
ej2/pixelpuncher | refs/heads/master | pixelpuncher/player/migrations/0007_auto_20151230_2157.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 21:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('player', '0006_auto_20151228_0324'),
]
operations = [
migrations.RemoveField(
model_name='skill',
name='critial_percentage',
),
migrations.RemoveField(
model_name='skill',
name='fail_percentage',
),
migrations.RemoveField(
model_name='skill',
name='maximum_damage',
),
migrations.RemoveField(
model_name='skill',
name='minimum_damage',
),
migrations.AddField(
model_name='playerskill',
name='bonus',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='playerskill',
name='critical_multipler',
field=models.DecimalField(decimal_places=2, default=2.0, max_digits=4),
),
migrations.AddField(
model_name='playerskill',
name='critical_percentage',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='playerskill',
name='dice_sides',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='playerskill',
name='energy_cost',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='playerskill',
name='hit_percentage',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='playerskill',
name='number_of_dice',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='bonus',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='critical_multipler',
field=models.DecimalField(decimal_places=2, default=2.0, max_digits=4),
),
migrations.AddField(
model_name='skill',
name='critical_percentage',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='dice_sides',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='gain_frequency',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='skill',
name='gained_critical',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='gained_critical_multipler',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='gained_energy_cost',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='gained_hit',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='hit_percentage',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='number_of_dice',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='skill',
name='skill_type',
field=models.CharField(choices=[(b'ATTK', b'Attack'), (b'SPCL', b'Special'), (b'HEAL', b'Heal')], default='ATTK', max_length=4),
preserve_default=False,
),
migrations.AddField(
model_name='skill',
name='special_type',
field=models.CharField(blank=True, choices=[(b'energygain', b'Energy Gain'), (b'healthgain', b'Health Gain')], max_length=12, null=True),
),
migrations.AddField(
model_name='skill',
name='special_value_1',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='special_value_2',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='skill',
name='special_value_3',
field=models.IntegerField(default=0),
),
]
|
illicitonion/givabit | refs/heads/master | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_1_2/django/conf/locale/hi/__init__.py | 12133432 | |
arrabito/DIRAC | refs/heads/integration | Workflow/__init__.py | 12133432 | |
maelnor/nova | refs/heads/master | nova/tests/virt/disk/__init__.py | 12133432 | |
elieux/pacman | refs/heads/master | test/pacman/tests/remove001.py | 12 | self.description = "Remove a package listed 5 times"
p = pmpkg("foo")
self.addpkg2db("local", p)
self.args = "-R " + "foo "*5
self.addrule("PACMAN_RETCODE=0")
self.addrule("!PKG_EXIST=foo")
|
maxive/erp | refs/heads/master | addons/auth_signup/models/res_partner.py | 2 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import random
import werkzeug.urls
from collections import defaultdict
from datetime import datetime, timedelta
from odoo import api, exceptions, fields, models, _
class SignupError(Exception):
pass
def random_token():
# the token has an entropy of about 120 bits (6 bits/char * 20 chars)
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
return ''.join(random.SystemRandom().choice(chars) for _ in range(20))
def now(**kwargs):
dt = datetime.now() + timedelta(**kwargs)
return fields.Datetime.to_string(dt)
class ResPartner(models.Model):
_inherit = 'res.partner'
signup_token = fields.Char(copy=False)
signup_type = fields.Char(string='Signup Token Type', copy=False)
signup_expiration = fields.Datetime(copy=False)
signup_valid = fields.Boolean(compute='_compute_signup_valid', string='Signup Token is Valid')
signup_url = fields.Char(compute='_compute_signup_url', string='Signup URL')
@api.multi
def _compute_signup_valid(self):
dt = now()
for partner in self:
partner.signup_valid = bool(partner.signup_token) and \
(not partner.signup_expiration or dt <= partner.signup_expiration)
@api.multi
def _compute_signup_url(self):
""" proxy for function field towards actual implementation """
result = self._get_signup_url_for_action()
for partner in self:
partner.signup_url = result.get(partner.id, False)
@api.multi
def _get_signup_url_for_action(self, action=None, view_type=None, menu_id=None, res_id=None, model=None):
""" generate a signup url for the given partner ids and action, possibly overriding
the url state components (menu_id, id, view_type) """
res = dict.fromkeys(self.ids, False)
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for partner in self:
# when required, make sure the partner has a valid signup token
if self.env.context.get('signup_valid') and not partner.user_ids:
partner.signup_prepare()
route = 'login'
# the parameters to encode for the query
query = dict(db=self.env.cr.dbname)
signup_type = self.env.context.get('signup_force_type_in_url', partner.signup_type or '')
if signup_type:
route = 'reset_password' if signup_type == 'reset' else signup_type
if partner.signup_token and signup_type:
query['token'] = partner.signup_token
elif partner.user_ids:
query['login'] = partner.user_ids[0].login
else:
continue # no signup token, no user, thus no signup url!
fragment = dict()
base = '/web#'
if action == '/mail/view':
base = '/mail/view?'
elif action:
fragment['action'] = action
if view_type:
fragment['view_type'] = view_type
if menu_id:
fragment['menu_id'] = menu_id
if model:
fragment['model'] = model
if res_id:
fragment['res_id'] = res_id
if fragment:
query['redirect'] = base + werkzeug.urls.url_encode(fragment)
res[partner.id] = werkzeug.urls.url_join(base_url, "/web/%s?%s" % (route, werkzeug.urls.url_encode(query)))
return res
@api.multi
def action_signup_prepare(self):
return self.signup_prepare()
def signup_get_auth_param(self):
""" Get a signup token related to the partner if signup is enabled.
If the partner already has a user, get the login parameter.
"""
res = defaultdict(dict)
allow_signup = self.env['ir.config_parameter'].sudo().get_param('auth_signup.invitation_scope', 'b2b') == 'b2c'
for partner in self:
if allow_signup and not partner.user_ids:
partner.signup_prepare()
res[partner.id]['auth_signup_token'] = partner.signup_token
elif partner.user_ids:
res[partner.id]['auth_login'] = partner.user_ids[0].login
return res
@api.multi
def signup_cancel(self):
return self.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
@api.multi
def signup_prepare(self, signup_type="signup", expiration=False):
""" generate a new token for the partners with the given validity, if necessary
:param expiration: the expiration datetime of the token (string, optional)
"""
for partner in self:
if expiration or not partner.signup_valid:
token = random_token()
while self._signup_retrieve_partner(token):
token = random_token()
partner.write({'signup_token': token, 'signup_type': signup_type, 'signup_expiration': expiration})
return True
@api.model
def _signup_retrieve_partner(self, token, check_validity=False, raise_exception=False):
""" find the partner corresponding to a token, and possibly check its validity
:param token: the token to resolve
:param check_validity: if True, also check validity
:param raise_exception: if True, raise exception instead of returning False
:return: partner (browse record) or False (if raise_exception is False)
"""
partner = self.search([('signup_token', '=', token)], limit=1)
if not partner:
if raise_exception:
raise exceptions.UserError(_("Signup token '%s' is not valid") % token)
return False
if check_validity and not partner.signup_valid:
if raise_exception:
raise exceptions.UserError(_("Signup token '%s' is no longer valid") % token)
return False
return partner
@api.model
def signup_retrieve_info(self, token):
""" retrieve the user info about the token
:return: a dictionary with the user information:
- 'db': the name of the database
- 'token': the token, if token is valid
- 'name': the name of the partner, if token is valid
- 'login': the user login, if the user already exists
- 'email': the partner email, if the user does not exist
"""
partner = self._signup_retrieve_partner(token, raise_exception=True)
res = {'db': self.env.cr.dbname}
if partner.signup_valid:
res['token'] = token
res['name'] = partner.name
if partner.user_ids:
res['login'] = partner.user_ids[0].login
else:
res['email'] = res['login'] = partner.email or ''
return res
|
michaelgallacher/intellij-community | refs/heads/master | python/testData/optimizeImports/disableAlphabeticalOrder.after.py | 5 | from __future__ import unicode_literals
from __future__ import absolute_import
import sys
from datetime import timedelta
import z
import b
import a
from a import C1
from alphabet import D
from b import func
from
import foo # broken
import # broken
from alphabet import *
from alphabet import C
from alphabet import B, A
from . import m1
from .. import m2
from .pkg import m3
from . import m4, m5
print(z, b, a, C1, func, sys, abc, foo, timedelta, A, B, C, D, m1, m2, m3, m4, m5) |
colede/qtcreator | refs/heads/master | tests/system/suite_APTW/tst_APTW01/test.py | 3 | #############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
# test New Qt Gui Application build and run for release and debug option
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
checkedTargets = createProject_Qt_GUI(tempDir(), "SampleApp")
# run project for debug and release and verify results
runVerify(checkedTargets)
#close Qt Creator
invokeMenuItem("File", "Exit")
|
Juanlu001/PyFME | refs/heads/master | examples/example_002_stationary_descent.py | 1 | # -*- coding: utf-8 -*-
"""
Python Flight Mechanics Engine (PyFME).
Copyright (c) AeroPython Development Team.
Distributed under the terms of the MIT License.
Example
-------
Cessna 172, ISA1976 integrated with Flat Earth (Euler angles).
Example with trimmed aircraft: stationary descent, symmetric, wings level
flight.
The main purpose of this example is to check if the aircraft trimmed in a given
state maintains the trimmed flight condition.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pyfme.aircrafts import Cessna172
from pyfme.environment.environment import Environment
from pyfme.environment.atmosphere import ISA1976
from pyfme.environment.gravity import VerticalConstant
from pyfme.environment.wind import NoWind
from pyfme.models.systems import EulerFlatEarth
from pyfme.simulator import BatchSimulation
from pyfme.utils.trimmer import steady_state_flight_trimmer
aircraft = Cessna172()
atmosphere = ISA1976()
gravity = VerticalConstant()
wind = NoWind()
environment = Environment(atmosphere, gravity, wind)
# Initial conditions.
TAS = 45 # m/s
h0 = 2000 # m
psi0 = 1.0 # rad
x0, y0 = 0, 0 # m
turn_rate = 0.0 # rad/s
gamma0 = -0.1 # rad
system = EulerFlatEarth(lat=0, lon=0, h=h0, psi=psi0, x_earth=x0, y_earth=y0)
not_trimmed_controls = {'delta_elevator': 0.05,
'delta_aileron': 0.01 * np.sign(turn_rate),
'delta_rudder': 0.01 * np.sign(turn_rate),
'delta_t': 0.5}
controls2trim = ['delta_elevator', 'delta_aileron', 'delta_rudder', 'delta_t']
trimmed_ac, trimmed_sys, trimmed_env, results = steady_state_flight_trimmer(
aircraft, system, environment, TAS=TAS, controls_0=not_trimmed_controls,
controls2trim=controls2trim, gamma=gamma0, turn_rate=turn_rate, verbose=1)
#print(results)
print()
print('delta_elevator = ',"%8.4f" % np.rad2deg(results['delta_elevator']), 'deg')
print('delta_aileron = ', "%8.4f" % np.rad2deg(results['delta_aileron']), 'deg')
print('delta_rudder = ', "%8.4f" % np.rad2deg(results['delta_rudder']), 'deg')
print('delta_t = ', "%8.4f" % results['delta_t'], '%')
print()
print('alpha = ', "%8.4f" % np.rad2deg(results['alpha']), 'deg')
print('beta = ', "%8.4f" % np.rad2deg(results['beta']), 'deg')
print()
print('u = ', "%8.4f" % results['u'], 'm/s')
print('v = ', "%8.4f" % results['v'], 'm/s')
print('w = ', "%8.4f" % results['w'], 'm/s')
print()
print('psi = ', "%8.4f" % np.rad2deg(psi0), 'deg')
print('theta = ', "%8.4f" % np.rad2deg(results['theta']), 'deg')
print('phi = ', "%8.4f" % np.rad2deg(results['phi']), 'deg')
print()
print('p =', "%8.4f" % results['p'], 'rad/s')
print('q =', "%8.4f" % results['q'], 'rad/s')
print('r =', "%8.4f" % results['r'], 'rad/s')
print()
my_simulation = BatchSimulation(trimmed_ac, trimmed_sys, trimmed_env)
tfin = 150 # seconds
N = tfin * 100 + 1
time = np.linspace(0, tfin, N)
initial_controls = trimmed_ac.controls
controls = {}
for control_name, control_value in initial_controls.items():
controls[control_name] = np.ones_like(time) * control_value
my_simulation.set_controls(time, controls)
par_list = ['x_earth', 'y_earth', 'height',
'psi', 'theta', 'phi',
'u', 'v', 'w',
'v_north', 'v_east', 'v_down',
'p', 'q', 'r',
'alpha', 'beta', 'TAS',
'F_xb', 'F_yb', 'F_zb',
'M_xb', 'M_yb', 'M_zb']
my_simulation.set_par_dict(par_list)
my_simulation.run_simulation()
# print(my_simulation.par_dict)
plt.style.use('ggplot')
for ii in range(len(par_list) // 3):
three_params = par_list[3 * ii:3 * ii + 3]
fig, ax = plt.subplots(3, 1, sharex=True)
for jj, par in enumerate(three_params):
ax[jj].plot(time, my_simulation.par_dict[par])
ax[jj].set_ylabel(par)
ax[jj].set_xlabel('time (s)')
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'])
ax.plot(my_simulation.par_dict['x_earth'],
my_simulation.par_dict['y_earth'],
my_simulation.par_dict['height'] * 0)
ax.set_xlabel('x_earth')
ax.set_ylabel('y_earth')
ax.set_zlabel('z_earth')
plt.show()
|
bikong2/scikit-learn | refs/heads/master | sklearn/utils/fixes.py | 13 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
|
madtypist/madtypist.github.io | refs/heads/master | .bundle/gems/pygments.rb-0.6.0/vendor/simplejson/simplejson/tests/test_bigint_as_string.py | 64 | from unittest import TestCase
import simplejson as json
class TestBigintAsString(TestCase):
values = [(200, 200),
((2 ** 53) - 1, 9007199254740991),
((2 ** 53), '9007199254740992'),
((2 ** 53) + 1, '9007199254740993'),
(-100, -100),
((-2 ** 53), '-9007199254740992'),
((-2 ** 53) - 1, '-9007199254740993'),
((-2 ** 53) + 1, -9007199254740991)]
def test_ints(self):
for val, expect in self.values:
self.assertEquals(
val,
json.loads(json.dumps(val)))
self.assertEquals(
expect,
json.loads(json.dumps(val, bigint_as_string=True)))
def test_lists(self):
for val, expect in self.values:
val = [val, val]
expect = [expect, expect]
self.assertEquals(
val,
json.loads(json.dumps(val)))
self.assertEquals(
expect,
json.loads(json.dumps(val, bigint_as_string=True)))
def test_dicts(self):
for val, expect in self.values:
val = {'k': val}
expect = {'k': expect}
self.assertEquals(
val,
json.loads(json.dumps(val)))
self.assertEquals(
expect,
json.loads(json.dumps(val, bigint_as_string=True)))
def test_dict_keys(self):
for val, _ in self.values:
expect = {str(val): 'value'}
val = {val: 'value'}
self.assertEquals(
expect,
json.loads(json.dumps(val)))
self.assertEquals(
expect,
json.loads(json.dumps(val, bigint_as_string=True)))
|
google/llvm-propeller | refs/heads/bb-clusters | lldb/test/API/commands/expression/import-std-module/unique_ptr/TestUniquePtrFromStdModule.py | 7 | """
Test basic std::unique_ptr functionality.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestUniquePtr(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
self.expect("expr (int)*s", substrs=['(int) $0 = 3'])
self.expect("expr (int)(*s = 5)", substrs=['(int) $1 = 5'])
self.expect("expr (int)*s", substrs=['(int) $2 = 5'])
self.expect("expr (bool)s", substrs=['(bool) $3 = true'])
self.expect("expr s.reset()")
self.expect("expr (bool)s", substrs=['(bool) $4 = false'])
|
gangadhar-kadam/sms-wnframework | refs/heads/master | core/doctype/workflow_transition/workflow_transition.py | 126 | # Copyright (c) 2012 Web Notes Technologies Pvt Ltd (http://erpnext.com)
#
# MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl |
xiaonanln/myleetcode-python | refs/heads/master | src/132. Palindrome Partitioning II.py | 1 | """
Given a string s, partition s such that every substring of the partition is a palindrome.
Return the minimum cuts needed for a palindrome partitioning of s.
For example, given s = "aab",
Return 1 since the palindrome partitioning ["aa","b"] could be produced using 1 cut.
"""
class Solution(object):
def minCut(self, s):
"""
:type s: str
:rtype: int
"""
print len(s)
def ispalindrome(i, j):
sub = s[i:j]
return sub == sub[::-1]
N = len(s)
dp = [[None]*(N+1) for _ in xrange(N+1)]
for L in xrange(0, N+1):
for i in xrange(0, N-L+1):
j = i + L
# print N, L, i, j
if ispalindrome(i, j):
dp[i][j] = 0
continue
mc = float('inf')
for k in xrange(i+1, j):
mc = min(mc, dp[i][k] + dp[k][j] + 1)
dp[i][j] = mc
return dp[0][N]
print Solution().minCut('aab')
import cProfile
cProfile.run("Solution().minCut('apjesgpsxoeiokmqmfgvjslcjukbqxpsobyhjpbgdfruqdkeiszrlmtwgfxyfostpqczidfljwfbbrflkgdvtytbgqalguewnhvvmcgxboycffopmtmhtfizxkmeftcucxpobxmelmjtuzigsxnncxpaibgpuijwhankxbplpyejxmrrjgeoevqozwdtgospohznkoyzocjlracchjqnggbfeebmuvbicbvmpuleywrpzwsihivnrwtxcukwplgtobhgxukwrdlszfaiqxwjvrgxnsveedxseeyeykarqnjrtlaliyudpacctzizcftjlunlgnfwcqqxcqikocqffsjyurzwysfjmswvhbrmshjuzsgpwyubtfbnwajuvrfhlccvfwhxfqthkcwhatktymgxostjlztwdxritygbrbibdgkezvzajizxasjnrcjwzdfvdnwwqeyumkamhzoqhnqjfzwzbixclcxqrtniznemxeahfozp')") |
JulesMichael/DASS | refs/heads/master | DASS_parser/exemple.py | 1 | from __init__ import compileProject
compileProject("exemple_datas")
|
mixman/djangodev | refs/heads/master | tests/regressiontests/admin_changelist/tests.py | 1 | from __future__ import with_statement, absolute_import
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.views.main import ChangeList, SEARCH_VAR, ALL_VAR
from django.contrib.auth.models import User
from django.template import Context, Template
from django.test import TestCase
from django.test.client import RequestFactory
from .admin import (ChildAdmin, QuartetAdmin, BandAdmin, ChordsBandAdmin,
GroupAdmin, ParentAdmin, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, CustomPaginationAdmin,
FilteredChildAdmin, CustomPaginator, site as custom_site,
SwallowAdmin)
from .models import (Child, Parent, Genre, Band, Musician, Group, Quartet,
Membership, ChordsMusician, ChordsBand, Invitation, Swallow)
class ChangeListTests(TestCase):
urls = "regressiontests.admin_changelist.urls"
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_query_set() shouldn't
overwrite a custom select_related provided by ModelAdmin.queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.query_set.query.select_related, {'parent': {'name': {}}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
row_html = '<tbody><tr class="row1"><th><a href="%d/">name</a></th><td class="nowrap">(None)</td></tr></tbody>' % new_child.id
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
row_html = '<tbody><tr class="row1"><th><a href="%d/">name</a></th><td class="nowrap">Parent object</td></tr></tbody>' % new_child.id
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertFalse(table_output.find(hiddenfields_div) == -1,
'Failed to find hidden fields in: %s' % table_output)
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertFalse('<td>%s</td>' % editable_name_field == -1,
'Failed to find "name" list_editable field in: %s' % table_output)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
new_child = Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda: \
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
new_child = Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.query_set.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.query_set.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.query_set.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(cl.paginator.page_range, [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.query_set.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(cl.paginator.page_range, [1, 2, 3])
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
self.assertContains(response, '<a href="%s/">%s</a>' % (i, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(
origin='Africa', load='12.34', speed='22.2')
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, unicode(swallow.origin))
self.assertContains(response, unicode(swallow.load))
self.assertContains(response, unicode(swallow.speed))
|
Azure/azure-sdk-for-python | refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline | sdk/redis/azure-mgmt-redis/azure/mgmt/redis/models/_models_py3.py | 1 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, List, Optional, Union
import msrest.serialization
from ._redis_management_client_enums import *
class CheckNameAvailabilityParameters(msrest.serialization.Model):
"""Parameters body to pass for resource name availability check.
All required parameters must be populated in order to send to Azure.
:param name: Required. Resource name.
:type name: str
:param type: Required. Resource type. The only legal value of this property for checking redis
cache name availability is 'Microsoft.Cache/redis'.
:type type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
type: str,
**kwargs
):
super(CheckNameAvailabilityParameters, self).__init__(**kwargs)
self.name = name
self.type = type
class ExportRDBParameters(msrest.serialization.Model):
"""Parameters for Redis export operation.
All required parameters must be populated in order to send to Azure.
:param format: File format.
:type format: str
:param prefix: Required. Prefix to use for exported files.
:type prefix: str
:param container: Required. Container name to export to.
:type container: str
"""
_validation = {
'prefix': {'required': True},
'container': {'required': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'prefix': {'key': 'prefix', 'type': 'str'},
'container': {'key': 'container', 'type': 'str'},
}
def __init__(
self,
*,
prefix: str,
container: str,
format: Optional[str] = None,
**kwargs
):
super(ExportRDBParameters, self).__init__(**kwargs)
self.format = format
self.prefix = prefix
self.container = container
class ImportRDBParameters(msrest.serialization.Model):
"""Parameters for Redis import operation.
All required parameters must be populated in order to send to Azure.
:param format: File format.
:type format: str
:param files: Required. files to import.
:type files: list[str]
"""
_validation = {
'files': {'required': True},
}
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
'files': {'key': 'files', 'type': '[str]'},
}
def __init__(
self,
*,
files: List[str],
format: Optional[str] = None,
**kwargs
):
super(ImportRDBParameters, self).__init__(**kwargs)
self.format = format
self.files = files
class NotificationListResponse(msrest.serialization.Model):
"""The response of listUpgradeNotifications.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of all notifications.
:type value: list[~azure.mgmt.redis.models.UpgradeNotification]
:ivar next_link: Link for next set of notifications.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UpgradeNotification]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["UpgradeNotification"]] = None,
**kwargs
):
super(NotificationListResponse, self).__init__(**kwargs)
self.value = value
self.next_link = None
class Operation(msrest.serialization.Model):
"""REST API operation.
:param name: Operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The object that describes the operation.
:type display: ~azure.mgmt.redis.models.OperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
class OperationDisplay(msrest.serialization.Model):
"""The object that describes the operation.
:param provider: Friendly name of the resource provider.
:type provider: str
:param operation: Operation type: read, write, delete, listKeys/action, etc.
:type operation: str
:param resource: Resource type on which the operation is performed.
:type resource: str
:param description: Friendly name of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
operation: Optional[str] = None,
resource: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.operation = operation
self.resource = resource
self.description = description
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list REST API operations. It contains a list of operations and a URL nextLink to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of operations supported by the resource provider.
:type value: list[~azure.mgmt.redis.models.Operation]
:ivar next_link: URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class Resource(msrest.serialization.Model):
"""The Resource definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have everything other than required location and tags.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class RedisAccessKeys(msrest.serialization.Model):
"""Redis cache access keys.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar primary_key: The current primary key that clients can use to authenticate with Redis
cache.
:vartype primary_key: str
:ivar secondary_key: The current secondary key that clients can use to authenticate with Redis
cache.
:vartype secondary_key: str
"""
_validation = {
'primary_key': {'readonly': True},
'secondary_key': {'readonly': True},
}
_attribute_map = {
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RedisAccessKeys, self).__init__(**kwargs)
self.primary_key = None
self.secondary_key = None
class RedisCommonProperties(msrest.serialization.Model):
"""Create/Update/Get common properties of the redis cache.
:param redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-
storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-
keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-
entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-
entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:type redis_configuration: dict[str, str]
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:type enable_non_ssl_port: bool
:param replicas_per_master: The number of replicas to be created per master.
:type replicas_per_master: int
:param tenant_settings: A dictionary of tenant settings.
:type tenant_settings: dict[str, str]
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:type shard_count: int
:param minimum_tls_version: Optional: requires clients to use a specified TLS version (or
higher) to connect (e,g, '1.0', '1.1', '1.2'). Possible values include: "1.0", "1.1", "1.2".
:type minimum_tls_version: str or ~azure.mgmt.redis.models.TlsVersion
"""
_attribute_map = {
'redis_configuration': {'key': 'redisConfiguration', 'type': '{str}'},
'enable_non_ssl_port': {'key': 'enableNonSslPort', 'type': 'bool'},
'replicas_per_master': {'key': 'replicasPerMaster', 'type': 'int'},
'tenant_settings': {'key': 'tenantSettings', 'type': '{str}'},
'shard_count': {'key': 'shardCount', 'type': 'int'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'str'},
}
def __init__(
self,
*,
redis_configuration: Optional[Dict[str, str]] = None,
enable_non_ssl_port: Optional[bool] = None,
replicas_per_master: Optional[int] = None,
tenant_settings: Optional[Dict[str, str]] = None,
shard_count: Optional[int] = None,
minimum_tls_version: Optional[Union[str, "TlsVersion"]] = None,
**kwargs
):
super(RedisCommonProperties, self).__init__(**kwargs)
self.redis_configuration = redis_configuration
self.enable_non_ssl_port = enable_non_ssl_port
self.replicas_per_master = replicas_per_master
self.tenant_settings = tenant_settings
self.shard_count = shard_count
self.minimum_tls_version = minimum_tls_version
class RedisCreateParameters(msrest.serialization.Model):
"""Parameters supplied to the Create Redis operation.
All required parameters must be populated in order to send to Azure.
:param zones: A list of availability zones denoting where the resource needs to come from.
:type zones: list[str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-
storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-
keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-
entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-
entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:type redis_configuration: dict[str, str]
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:type enable_non_ssl_port: bool
:param replicas_per_master: The number of replicas to be created per master.
:type replicas_per_master: int
:param tenant_settings: A dictionary of tenant settings.
:type tenant_settings: dict[str, str]
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:type shard_count: int
:param minimum_tls_version: Optional: requires clients to use a specified TLS version (or
higher) to connect (e,g, '1.0', '1.1', '1.2'). Possible values include: "1.0", "1.1", "1.2".
:type minimum_tls_version: str or ~azure.mgmt.redis.models.TlsVersion
:param sku: Required. The SKU of the Redis cache to deploy.
:type sku: ~azure.mgmt.redis.models.Sku
:param subnet_id: The full resource ID of a subnet in a virtual network to deploy the Redis
cache in. Example format:
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1.
:type subnet_id: str
:param static_ip: Static IP address. Required when deploying a Redis cache inside an existing
Azure Virtual Network.
:type static_ip: str
"""
_validation = {
'location': {'required': True},
'sku': {'required': True},
'subnet_id': {'pattern': r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.(ClassicNetwork|Network)/virtualNetworks/[^/]*/subnets/[^/]*$'},
'static_ip': {'pattern': r'^\d+\.\d+\.\d+\.\d+$'},
}
_attribute_map = {
'zones': {'key': 'zones', 'type': '[str]'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'redis_configuration': {'key': 'properties.redisConfiguration', 'type': '{str}'},
'enable_non_ssl_port': {'key': 'properties.enableNonSslPort', 'type': 'bool'},
'replicas_per_master': {'key': 'properties.replicasPerMaster', 'type': 'int'},
'tenant_settings': {'key': 'properties.tenantSettings', 'type': '{str}'},
'shard_count': {'key': 'properties.shardCount', 'type': 'int'},
'minimum_tls_version': {'key': 'properties.minimumTlsVersion', 'type': 'str'},
'sku': {'key': 'properties.sku', 'type': 'Sku'},
'subnet_id': {'key': 'properties.subnetId', 'type': 'str'},
'static_ip': {'key': 'properties.staticIP', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
zones: Optional[List[str]] = None,
tags: Optional[Dict[str, str]] = None,
redis_configuration: Optional[Dict[str, str]] = None,
enable_non_ssl_port: Optional[bool] = None,
replicas_per_master: Optional[int] = None,
tenant_settings: Optional[Dict[str, str]] = None,
shard_count: Optional[int] = None,
minimum_tls_version: Optional[Union[str, "TlsVersion"]] = None,
subnet_id: Optional[str] = None,
static_ip: Optional[str] = None,
**kwargs
):
super(RedisCreateParameters, self).__init__(**kwargs)
self.zones = zones
self.location = location
self.tags = tags
self.redis_configuration = redis_configuration
self.enable_non_ssl_port = enable_non_ssl_port
self.replicas_per_master = replicas_per_master
self.tenant_settings = tenant_settings
self.shard_count = shard_count
self.minimum_tls_version = minimum_tls_version
self.sku = sku
self.subnet_id = subnet_id
self.static_ip = static_ip
class RedisCreateProperties(RedisCommonProperties):
"""Properties supplied to Create Redis operation.
All required parameters must be populated in order to send to Azure.
:param redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-
storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-
keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-
entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-
entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:type redis_configuration: dict[str, str]
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:type enable_non_ssl_port: bool
:param replicas_per_master: The number of replicas to be created per master.
:type replicas_per_master: int
:param tenant_settings: A dictionary of tenant settings.
:type tenant_settings: dict[str, str]
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:type shard_count: int
:param minimum_tls_version: Optional: requires clients to use a specified TLS version (or
higher) to connect (e,g, '1.0', '1.1', '1.2'). Possible values include: "1.0", "1.1", "1.2".
:type minimum_tls_version: str or ~azure.mgmt.redis.models.TlsVersion
:param sku: Required. The SKU of the Redis cache to deploy.
:type sku: ~azure.mgmt.redis.models.Sku
:param subnet_id: The full resource ID of a subnet in a virtual network to deploy the Redis
cache in. Example format:
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1.
:type subnet_id: str
:param static_ip: Static IP address. Required when deploying a Redis cache inside an existing
Azure Virtual Network.
:type static_ip: str
"""
_validation = {
'sku': {'required': True},
'subnet_id': {'pattern': r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.(ClassicNetwork|Network)/virtualNetworks/[^/]*/subnets/[^/]*$'},
'static_ip': {'pattern': r'^\d+\.\d+\.\d+\.\d+$'},
}
_attribute_map = {
'redis_configuration': {'key': 'redisConfiguration', 'type': '{str}'},
'enable_non_ssl_port': {'key': 'enableNonSslPort', 'type': 'bool'},
'replicas_per_master': {'key': 'replicasPerMaster', 'type': 'int'},
'tenant_settings': {'key': 'tenantSettings', 'type': '{str}'},
'shard_count': {'key': 'shardCount', 'type': 'int'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'subnet_id': {'key': 'subnetId', 'type': 'str'},
'static_ip': {'key': 'staticIP', 'type': 'str'},
}
def __init__(
self,
*,
sku: "Sku",
redis_configuration: Optional[Dict[str, str]] = None,
enable_non_ssl_port: Optional[bool] = None,
replicas_per_master: Optional[int] = None,
tenant_settings: Optional[Dict[str, str]] = None,
shard_count: Optional[int] = None,
minimum_tls_version: Optional[Union[str, "TlsVersion"]] = None,
subnet_id: Optional[str] = None,
static_ip: Optional[str] = None,
**kwargs
):
super(RedisCreateProperties, self).__init__(redis_configuration=redis_configuration, enable_non_ssl_port=enable_non_ssl_port, replicas_per_master=replicas_per_master, tenant_settings=tenant_settings, shard_count=shard_count, minimum_tls_version=minimum_tls_version, **kwargs)
self.sku = sku
self.subnet_id = subnet_id
self.static_ip = static_ip
class RedisFirewallRule(Resource):
"""A firewall rule on a redis cache has a name, and describes a contiguous range of IP addresses permitted to connect.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param start_ip: Required. lowest IP address included in the range.
:type start_ip: str
:param end_ip: Required. highest IP address included in the range.
:type end_ip: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'start_ip': {'required': True},
'end_ip': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start_ip': {'key': 'properties.startIP', 'type': 'str'},
'end_ip': {'key': 'properties.endIP', 'type': 'str'},
}
def __init__(
self,
*,
start_ip: str,
end_ip: str,
**kwargs
):
super(RedisFirewallRule, self).__init__(**kwargs)
self.start_ip = start_ip
self.end_ip = end_ip
class RedisFirewallRuleCreateParameters(msrest.serialization.Model):
"""Parameters required for creating a firewall rule on redis cache.
All required parameters must be populated in order to send to Azure.
:param start_ip: Required. lowest IP address included in the range.
:type start_ip: str
:param end_ip: Required. highest IP address included in the range.
:type end_ip: str
"""
_validation = {
'start_ip': {'required': True},
'end_ip': {'required': True},
}
_attribute_map = {
'start_ip': {'key': 'properties.startIP', 'type': 'str'},
'end_ip': {'key': 'properties.endIP', 'type': 'str'},
}
def __init__(
self,
*,
start_ip: str,
end_ip: str,
**kwargs
):
super(RedisFirewallRuleCreateParameters, self).__init__(**kwargs)
self.start_ip = start_ip
self.end_ip = end_ip
class RedisFirewallRuleListResult(msrest.serialization.Model):
"""The response of list firewall rules Redis operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: Results of the list firewall rules operation.
:type value: list[~azure.mgmt.redis.models.RedisFirewallRule]
:ivar next_link: Link for next page of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RedisFirewallRule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RedisFirewallRule"]] = None,
**kwargs
):
super(RedisFirewallRuleListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class RedisForceRebootResponse(msrest.serialization.Model):
"""Response to force reboot for Redis cache.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar message: Status message.
:vartype message: str
"""
_validation = {
'message': {'readonly': True},
}
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RedisForceRebootResponse, self).__init__(**kwargs)
self.message = None
class RedisInstanceDetails(msrest.serialization.Model):
"""Details of single instance of redis.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ssl_port: Redis instance SSL port.
:vartype ssl_port: int
:ivar non_ssl_port: If enableNonSslPort is true, provides Redis instance Non-SSL port.
:vartype non_ssl_port: int
:ivar zone: If the Cache uses availability zones, specifies availability zone where this
instance is located.
:vartype zone: str
:ivar shard_id: If clustering is enabled, the Shard ID of Redis Instance.
:vartype shard_id: int
:ivar is_master: Specifies whether the instance is a master node.
:vartype is_master: bool
"""
_validation = {
'ssl_port': {'readonly': True},
'non_ssl_port': {'readonly': True},
'zone': {'readonly': True},
'shard_id': {'readonly': True},
'is_master': {'readonly': True},
}
_attribute_map = {
'ssl_port': {'key': 'sslPort', 'type': 'int'},
'non_ssl_port': {'key': 'nonSslPort', 'type': 'int'},
'zone': {'key': 'zone', 'type': 'str'},
'shard_id': {'key': 'shardId', 'type': 'int'},
'is_master': {'key': 'isMaster', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(RedisInstanceDetails, self).__init__(**kwargs)
self.ssl_port = None
self.non_ssl_port = None
self.zone = None
self.shard_id = None
self.is_master = None
class RedisLinkedServer(msrest.serialization.Model):
"""Linked server Id.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Linked server Id.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RedisLinkedServer, self).__init__(**kwargs)
self.id = None
class RedisLinkedServerCreateParameters(msrest.serialization.Model):
"""Parameter required for creating a linked server to redis cache.
All required parameters must be populated in order to send to Azure.
:param linked_redis_cache_id: Required. Fully qualified resourceId of the linked redis cache.
:type linked_redis_cache_id: str
:param linked_redis_cache_location: Required. Location of the linked redis cache.
:type linked_redis_cache_location: str
:param server_role: Required. Role of the linked server. Possible values include: "Primary",
"Secondary".
:type server_role: str or ~azure.mgmt.redis.models.ReplicationRole
"""
_validation = {
'linked_redis_cache_id': {'required': True},
'linked_redis_cache_location': {'required': True},
'server_role': {'required': True},
}
_attribute_map = {
'linked_redis_cache_id': {'key': 'properties.linkedRedisCacheId', 'type': 'str'},
'linked_redis_cache_location': {'key': 'properties.linkedRedisCacheLocation', 'type': 'str'},
'server_role': {'key': 'properties.serverRole', 'type': 'str'},
}
def __init__(
self,
*,
linked_redis_cache_id: str,
linked_redis_cache_location: str,
server_role: Union[str, "ReplicationRole"],
**kwargs
):
super(RedisLinkedServerCreateParameters, self).__init__(**kwargs)
self.linked_redis_cache_id = linked_redis_cache_id
self.linked_redis_cache_location = linked_redis_cache_location
self.server_role = server_role
class RedisLinkedServerCreateProperties(msrest.serialization.Model):
"""Create properties for a linked server.
All required parameters must be populated in order to send to Azure.
:param linked_redis_cache_id: Required. Fully qualified resourceId of the linked redis cache.
:type linked_redis_cache_id: str
:param linked_redis_cache_location: Required. Location of the linked redis cache.
:type linked_redis_cache_location: str
:param server_role: Required. Role of the linked server. Possible values include: "Primary",
"Secondary".
:type server_role: str or ~azure.mgmt.redis.models.ReplicationRole
"""
_validation = {
'linked_redis_cache_id': {'required': True},
'linked_redis_cache_location': {'required': True},
'server_role': {'required': True},
}
_attribute_map = {
'linked_redis_cache_id': {'key': 'linkedRedisCacheId', 'type': 'str'},
'linked_redis_cache_location': {'key': 'linkedRedisCacheLocation', 'type': 'str'},
'server_role': {'key': 'serverRole', 'type': 'str'},
}
def __init__(
self,
*,
linked_redis_cache_id: str,
linked_redis_cache_location: str,
server_role: Union[str, "ReplicationRole"],
**kwargs
):
super(RedisLinkedServerCreateProperties, self).__init__(**kwargs)
self.linked_redis_cache_id = linked_redis_cache_id
self.linked_redis_cache_location = linked_redis_cache_location
self.server_role = server_role
class RedisLinkedServerProperties(RedisLinkedServerCreateProperties):
"""Properties of a linked server to be returned in get/put response.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param linked_redis_cache_id: Required. Fully qualified resourceId of the linked redis cache.
:type linked_redis_cache_id: str
:param linked_redis_cache_location: Required. Location of the linked redis cache.
:type linked_redis_cache_location: str
:param server_role: Required. Role of the linked server. Possible values include: "Primary",
"Secondary".
:type server_role: str or ~azure.mgmt.redis.models.ReplicationRole
:ivar provisioning_state: Terminal state of the link between primary and secondary redis cache.
:vartype provisioning_state: str
"""
_validation = {
'linked_redis_cache_id': {'required': True},
'linked_redis_cache_location': {'required': True},
'server_role': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'linked_redis_cache_id': {'key': 'linkedRedisCacheId', 'type': 'str'},
'linked_redis_cache_location': {'key': 'linkedRedisCacheLocation', 'type': 'str'},
'server_role': {'key': 'serverRole', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
linked_redis_cache_id: str,
linked_redis_cache_location: str,
server_role: Union[str, "ReplicationRole"],
**kwargs
):
super(RedisLinkedServerProperties, self).__init__(linked_redis_cache_id=linked_redis_cache_id, linked_redis_cache_location=linked_redis_cache_location, server_role=server_role, **kwargs)
self.provisioning_state = None
class RedisLinkedServerWithProperties(Resource):
"""Response to put/get linked server (with properties) for Redis cache.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param linked_redis_cache_id: Fully qualified resourceId of the linked redis cache.
:type linked_redis_cache_id: str
:param linked_redis_cache_location: Location of the linked redis cache.
:type linked_redis_cache_location: str
:param server_role: Role of the linked server. Possible values include: "Primary", "Secondary".
:type server_role: str or ~azure.mgmt.redis.models.ReplicationRole
:ivar provisioning_state: Terminal state of the link between primary and secondary redis cache.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'linked_redis_cache_id': {'key': 'properties.linkedRedisCacheId', 'type': 'str'},
'linked_redis_cache_location': {'key': 'properties.linkedRedisCacheLocation', 'type': 'str'},
'server_role': {'key': 'properties.serverRole', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
linked_redis_cache_id: Optional[str] = None,
linked_redis_cache_location: Optional[str] = None,
server_role: Optional[Union[str, "ReplicationRole"]] = None,
**kwargs
):
super(RedisLinkedServerWithProperties, self).__init__(**kwargs)
self.linked_redis_cache_id = linked_redis_cache_id
self.linked_redis_cache_location = linked_redis_cache_location
self.server_role = server_role
self.provisioning_state = None
class RedisLinkedServerWithPropertiesList(msrest.serialization.Model):
"""List of linked servers (with properties) of a Redis cache.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of linked servers (with properties) of a Redis cache.
:type value: list[~azure.mgmt.redis.models.RedisLinkedServerWithProperties]
:ivar next_link: Link for next set.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RedisLinkedServerWithProperties]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RedisLinkedServerWithProperties"]] = None,
**kwargs
):
super(RedisLinkedServerWithPropertiesList, self).__init__(**kwargs)
self.value = value
self.next_link = None
class RedisListResult(msrest.serialization.Model):
"""The response of list Redis operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: List of Redis cache instances.
:type value: list[~azure.mgmt.redis.models.RedisResource]
:ivar next_link: Link for next page of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RedisResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RedisResource"]] = None,
**kwargs
):
super(RedisListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class RedisPatchSchedule(Resource):
"""Response to put/get patch schedules for Redis cache.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param schedule_entries: Required. List of patch schedules for a Redis cache.
:type schedule_entries: list[~azure.mgmt.redis.models.ScheduleEntry]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'schedule_entries': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'schedule_entries': {'key': 'properties.scheduleEntries', 'type': '[ScheduleEntry]'},
}
def __init__(
self,
*,
schedule_entries: List["ScheduleEntry"],
**kwargs
):
super(RedisPatchSchedule, self).__init__(**kwargs)
self.schedule_entries = schedule_entries
class RedisPatchScheduleListResult(msrest.serialization.Model):
"""The response of list patch schedules Redis operation.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: Results of the list patch schedules operation.
:type value: list[~azure.mgmt.redis.models.RedisPatchSchedule]
:ivar next_link: Link for next page of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RedisPatchSchedule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RedisPatchSchedule"]] = None,
**kwargs
):
super(RedisPatchScheduleListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class RedisProperties(RedisCreateProperties):
"""Properties of the redis cache.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-
storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-
keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-
entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-
entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:type redis_configuration: dict[str, str]
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:type enable_non_ssl_port: bool
:param replicas_per_master: The number of replicas to be created per master.
:type replicas_per_master: int
:param tenant_settings: A dictionary of tenant settings.
:type tenant_settings: dict[str, str]
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:type shard_count: int
:param minimum_tls_version: Optional: requires clients to use a specified TLS version (or
higher) to connect (e,g, '1.0', '1.1', '1.2'). Possible values include: "1.0", "1.1", "1.2".
:type minimum_tls_version: str or ~azure.mgmt.redis.models.TlsVersion
:param sku: Required. The SKU of the Redis cache to deploy.
:type sku: ~azure.mgmt.redis.models.Sku
:param subnet_id: The full resource ID of a subnet in a virtual network to deploy the Redis
cache in. Example format:
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1.
:type subnet_id: str
:param static_ip: Static IP address. Required when deploying a Redis cache inside an existing
Azure Virtual Network.
:type static_ip: str
:ivar redis_version: Redis version.
:vartype redis_version: str
:ivar provisioning_state: Redis instance provisioning status. Possible values include:
"Creating", "Deleting", "Disabled", "Failed", "Linking", "Provisioning",
"RecoveringScaleFailure", "Scaling", "Succeeded", "Unlinking", "Unprovisioning", "Updating".
:vartype provisioning_state: str or ~azure.mgmt.redis.models.ProvisioningState
:ivar host_name: Redis host name.
:vartype host_name: str
:ivar port: Redis non-SSL port.
:vartype port: int
:ivar ssl_port: Redis SSL port.
:vartype ssl_port: int
:ivar access_keys: The keys of the Redis cache - not set if this object is not the response to
Create or Update redis cache.
:vartype access_keys: ~azure.mgmt.redis.models.RedisAccessKeys
:ivar linked_servers: List of the linked servers associated with the cache.
:vartype linked_servers: list[~azure.mgmt.redis.models.RedisLinkedServer]
:ivar instances: List of the Redis instances associated with the cache.
:vartype instances: list[~azure.mgmt.redis.models.RedisInstanceDetails]
"""
_validation = {
'sku': {'required': True},
'subnet_id': {'pattern': r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.(ClassicNetwork|Network)/virtualNetworks/[^/]*/subnets/[^/]*$'},
'static_ip': {'pattern': r'^\d+\.\d+\.\d+\.\d+$'},
'redis_version': {'readonly': True},
'provisioning_state': {'readonly': True},
'host_name': {'readonly': True},
'port': {'readonly': True},
'ssl_port': {'readonly': True},
'access_keys': {'readonly': True},
'linked_servers': {'readonly': True},
'instances': {'readonly': True},
}
_attribute_map = {
'redis_configuration': {'key': 'redisConfiguration', 'type': '{str}'},
'enable_non_ssl_port': {'key': 'enableNonSslPort', 'type': 'bool'},
'replicas_per_master': {'key': 'replicasPerMaster', 'type': 'int'},
'tenant_settings': {'key': 'tenantSettings', 'type': '{str}'},
'shard_count': {'key': 'shardCount', 'type': 'int'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'subnet_id': {'key': 'subnetId', 'type': 'str'},
'static_ip': {'key': 'staticIP', 'type': 'str'},
'redis_version': {'key': 'redisVersion', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'ssl_port': {'key': 'sslPort', 'type': 'int'},
'access_keys': {'key': 'accessKeys', 'type': 'RedisAccessKeys'},
'linked_servers': {'key': 'linkedServers', 'type': '[RedisLinkedServer]'},
'instances': {'key': 'instances', 'type': '[RedisInstanceDetails]'},
}
def __init__(
self,
*,
sku: "Sku",
redis_configuration: Optional[Dict[str, str]] = None,
enable_non_ssl_port: Optional[bool] = None,
replicas_per_master: Optional[int] = None,
tenant_settings: Optional[Dict[str, str]] = None,
shard_count: Optional[int] = None,
minimum_tls_version: Optional[Union[str, "TlsVersion"]] = None,
subnet_id: Optional[str] = None,
static_ip: Optional[str] = None,
**kwargs
):
super(RedisProperties, self).__init__(redis_configuration=redis_configuration, enable_non_ssl_port=enable_non_ssl_port, replicas_per_master=replicas_per_master, tenant_settings=tenant_settings, shard_count=shard_count, minimum_tls_version=minimum_tls_version, sku=sku, subnet_id=subnet_id, static_ip=static_ip, **kwargs)
self.redis_version = None
self.provisioning_state = None
self.host_name = None
self.port = None
self.ssl_port = None
self.access_keys = None
self.linked_servers = None
self.instances = None
class RedisRebootParameters(msrest.serialization.Model):
"""Specifies which Redis node(s) to reboot.
:param reboot_type: Which Redis node(s) to reboot. Depending on this value data loss is
possible. Possible values include: "PrimaryNode", "SecondaryNode", "AllNodes".
:type reboot_type: str or ~azure.mgmt.redis.models.RebootType
:param shard_id: If clustering is enabled, the ID of the shard to be rebooted.
:type shard_id: int
:param ports: A list of redis instances to reboot, specified by per-instance SSL ports or non-
SSL ports.
:type ports: list[int]
"""
_attribute_map = {
'reboot_type': {'key': 'rebootType', 'type': 'str'},
'shard_id': {'key': 'shardId', 'type': 'int'},
'ports': {'key': 'ports', 'type': '[int]'},
}
def __init__(
self,
*,
reboot_type: Optional[Union[str, "RebootType"]] = None,
shard_id: Optional[int] = None,
ports: Optional[List[int]] = None,
**kwargs
):
super(RedisRebootParameters, self).__init__(**kwargs)
self.reboot_type = reboot_type
self.shard_id = shard_id
self.ports = ports
class RedisRegenerateKeyParameters(msrest.serialization.Model):
"""Specifies which Redis access keys to reset.
All required parameters must be populated in order to send to Azure.
:param key_type: Required. The Redis access key to regenerate. Possible values include:
"Primary", "Secondary".
:type key_type: str or ~azure.mgmt.redis.models.RedisKeyType
"""
_validation = {
'key_type': {'required': True},
}
_attribute_map = {
'key_type': {'key': 'keyType', 'type': 'str'},
}
def __init__(
self,
*,
key_type: Union[str, "RedisKeyType"],
**kwargs
):
super(RedisRegenerateKeyParameters, self).__init__(**kwargs)
self.key_type = key_type
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class RedisResource(TrackedResource):
"""A single Redis item in List or Get Operation.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param zones: A list of availability zones denoting where the resource needs to come from.
:type zones: list[str]
:param redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-
storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-
keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-
entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-
entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:type redis_configuration: dict[str, str]
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:type enable_non_ssl_port: bool
:param replicas_per_master: The number of replicas to be created per master.
:type replicas_per_master: int
:param tenant_settings: A dictionary of tenant settings.
:type tenant_settings: dict[str, str]
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:type shard_count: int
:param minimum_tls_version: Optional: requires clients to use a specified TLS version (or
higher) to connect (e,g, '1.0', '1.1', '1.2'). Possible values include: "1.0", "1.1", "1.2".
:type minimum_tls_version: str or ~azure.mgmt.redis.models.TlsVersion
:param sku: Required. The SKU of the Redis cache to deploy.
:type sku: ~azure.mgmt.redis.models.Sku
:param subnet_id: The full resource ID of a subnet in a virtual network to deploy the Redis
cache in. Example format:
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1.
:type subnet_id: str
:param static_ip: Static IP address. Required when deploying a Redis cache inside an existing
Azure Virtual Network.
:type static_ip: str
:ivar redis_version: Redis version.
:vartype redis_version: str
:ivar provisioning_state: Redis instance provisioning status. Possible values include:
"Creating", "Deleting", "Disabled", "Failed", "Linking", "Provisioning",
"RecoveringScaleFailure", "Scaling", "Succeeded", "Unlinking", "Unprovisioning", "Updating".
:vartype provisioning_state: str or ~azure.mgmt.redis.models.ProvisioningState
:ivar host_name: Redis host name.
:vartype host_name: str
:ivar port: Redis non-SSL port.
:vartype port: int
:ivar ssl_port: Redis SSL port.
:vartype ssl_port: int
:ivar access_keys: The keys of the Redis cache - not set if this object is not the response to
Create or Update redis cache.
:vartype access_keys: ~azure.mgmt.redis.models.RedisAccessKeys
:ivar linked_servers: List of the linked servers associated with the cache.
:vartype linked_servers: list[~azure.mgmt.redis.models.RedisLinkedServer]
:ivar instances: List of the Redis instances associated with the cache.
:vartype instances: list[~azure.mgmt.redis.models.RedisInstanceDetails]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'subnet_id': {'pattern': r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.(ClassicNetwork|Network)/virtualNetworks/[^/]*/subnets/[^/]*$'},
'static_ip': {'pattern': r'^\d+\.\d+\.\d+\.\d+$'},
'redis_version': {'readonly': True},
'provisioning_state': {'readonly': True},
'host_name': {'readonly': True},
'port': {'readonly': True},
'ssl_port': {'readonly': True},
'access_keys': {'readonly': True},
'linked_servers': {'readonly': True},
'instances': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'redis_configuration': {'key': 'properties.redisConfiguration', 'type': '{str}'},
'enable_non_ssl_port': {'key': 'properties.enableNonSslPort', 'type': 'bool'},
'replicas_per_master': {'key': 'properties.replicasPerMaster', 'type': 'int'},
'tenant_settings': {'key': 'properties.tenantSettings', 'type': '{str}'},
'shard_count': {'key': 'properties.shardCount', 'type': 'int'},
'minimum_tls_version': {'key': 'properties.minimumTlsVersion', 'type': 'str'},
'sku': {'key': 'properties.sku', 'type': 'Sku'},
'subnet_id': {'key': 'properties.subnetId', 'type': 'str'},
'static_ip': {'key': 'properties.staticIP', 'type': 'str'},
'redis_version': {'key': 'properties.redisVersion', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'ssl_port': {'key': 'properties.sslPort', 'type': 'int'},
'access_keys': {'key': 'properties.accessKeys', 'type': 'RedisAccessKeys'},
'linked_servers': {'key': 'properties.linkedServers', 'type': '[RedisLinkedServer]'},
'instances': {'key': 'properties.instances', 'type': '[RedisInstanceDetails]'},
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
zones: Optional[List[str]] = None,
redis_configuration: Optional[Dict[str, str]] = None,
enable_non_ssl_port: Optional[bool] = None,
replicas_per_master: Optional[int] = None,
tenant_settings: Optional[Dict[str, str]] = None,
shard_count: Optional[int] = None,
minimum_tls_version: Optional[Union[str, "TlsVersion"]] = None,
subnet_id: Optional[str] = None,
static_ip: Optional[str] = None,
**kwargs
):
super(RedisResource, self).__init__(tags=tags, location=location, **kwargs)
self.zones = zones
self.redis_configuration = redis_configuration
self.enable_non_ssl_port = enable_non_ssl_port
self.replicas_per_master = replicas_per_master
self.tenant_settings = tenant_settings
self.shard_count = shard_count
self.minimum_tls_version = minimum_tls_version
self.sku = sku
self.subnet_id = subnet_id
self.static_ip = static_ip
self.redis_version = None
self.provisioning_state = None
self.host_name = None
self.port = None
self.ssl_port = None
self.access_keys = None
self.linked_servers = None
self.instances = None
class RedisUpdateParameters(msrest.serialization.Model):
"""Parameters supplied to the Update Redis operation.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-
storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-
keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-
entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-
entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:type redis_configuration: dict[str, str]
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:type enable_non_ssl_port: bool
:param replicas_per_master: The number of replicas to be created per master.
:type replicas_per_master: int
:param tenant_settings: A dictionary of tenant settings.
:type tenant_settings: dict[str, str]
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:type shard_count: int
:param minimum_tls_version: Optional: requires clients to use a specified TLS version (or
higher) to connect (e,g, '1.0', '1.1', '1.2'). Possible values include: "1.0", "1.1", "1.2".
:type minimum_tls_version: str or ~azure.mgmt.redis.models.TlsVersion
:param sku: The SKU of the Redis cache to deploy.
:type sku: ~azure.mgmt.redis.models.Sku
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'redis_configuration': {'key': 'properties.redisConfiguration', 'type': '{str}'},
'enable_non_ssl_port': {'key': 'properties.enableNonSslPort', 'type': 'bool'},
'replicas_per_master': {'key': 'properties.replicasPerMaster', 'type': 'int'},
'tenant_settings': {'key': 'properties.tenantSettings', 'type': '{str}'},
'shard_count': {'key': 'properties.shardCount', 'type': 'int'},
'minimum_tls_version': {'key': 'properties.minimumTlsVersion', 'type': 'str'},
'sku': {'key': 'properties.sku', 'type': 'Sku'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
redis_configuration: Optional[Dict[str, str]] = None,
enable_non_ssl_port: Optional[bool] = None,
replicas_per_master: Optional[int] = None,
tenant_settings: Optional[Dict[str, str]] = None,
shard_count: Optional[int] = None,
minimum_tls_version: Optional[Union[str, "TlsVersion"]] = None,
sku: Optional["Sku"] = None,
**kwargs
):
super(RedisUpdateParameters, self).__init__(**kwargs)
self.tags = tags
self.redis_configuration = redis_configuration
self.enable_non_ssl_port = enable_non_ssl_port
self.replicas_per_master = replicas_per_master
self.tenant_settings = tenant_settings
self.shard_count = shard_count
self.minimum_tls_version = minimum_tls_version
self.sku = sku
class RedisUpdateProperties(RedisCommonProperties):
"""Patchable properties of the redis cache.
:param redis_configuration: All Redis Settings. Few possible keys: rdb-backup-enabled,rdb-
storage-connection-string,rdb-backup-frequency,maxmemory-delta,maxmemory-policy,notify-
keyspace-events,maxmemory-samples,slowlog-log-slower-than,slowlog-max-len,list-max-ziplist-
entries,list-max-ziplist-value,hash-max-ziplist-entries,hash-max-ziplist-value,set-max-intset-
entries,zset-max-ziplist-entries,zset-max-ziplist-value etc.
:type redis_configuration: dict[str, str]
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled.
:type enable_non_ssl_port: bool
:param replicas_per_master: The number of replicas to be created per master.
:type replicas_per_master: int
:param tenant_settings: A dictionary of tenant settings.
:type tenant_settings: dict[str, str]
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:type shard_count: int
:param minimum_tls_version: Optional: requires clients to use a specified TLS version (or
higher) to connect (e,g, '1.0', '1.1', '1.2'). Possible values include: "1.0", "1.1", "1.2".
:type minimum_tls_version: str or ~azure.mgmt.redis.models.TlsVersion
:param sku: The SKU of the Redis cache to deploy.
:type sku: ~azure.mgmt.redis.models.Sku
"""
_attribute_map = {
'redis_configuration': {'key': 'redisConfiguration', 'type': '{str}'},
'enable_non_ssl_port': {'key': 'enableNonSslPort', 'type': 'bool'},
'replicas_per_master': {'key': 'replicasPerMaster', 'type': 'int'},
'tenant_settings': {'key': 'tenantSettings', 'type': '{str}'},
'shard_count': {'key': 'shardCount', 'type': 'int'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
redis_configuration: Optional[Dict[str, str]] = None,
enable_non_ssl_port: Optional[bool] = None,
replicas_per_master: Optional[int] = None,
tenant_settings: Optional[Dict[str, str]] = None,
shard_count: Optional[int] = None,
minimum_tls_version: Optional[Union[str, "TlsVersion"]] = None,
sku: Optional["Sku"] = None,
**kwargs
):
super(RedisUpdateProperties, self).__init__(redis_configuration=redis_configuration, enable_non_ssl_port=enable_non_ssl_port, replicas_per_master=replicas_per_master, tenant_settings=tenant_settings, shard_count=shard_count, minimum_tls_version=minimum_tls_version, **kwargs)
self.sku = sku
class ScheduleEntry(msrest.serialization.Model):
"""Patch schedule entry for a Premium Redis Cache.
All required parameters must be populated in order to send to Azure.
:param day_of_week: Required. Day of the week when a cache can be patched. Possible values
include: "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday",
"Everyday", "Weekend".
:type day_of_week: str or ~azure.mgmt.redis.models.DayOfWeek
:param start_hour_utc: Required. Start hour after which cache patching can start.
:type start_hour_utc: int
:param maintenance_window: ISO8601 timespan specifying how much time cache patching can take.
:type maintenance_window: ~datetime.timedelta
"""
_validation = {
'day_of_week': {'required': True},
'start_hour_utc': {'required': True},
}
_attribute_map = {
'day_of_week': {'key': 'dayOfWeek', 'type': 'str'},
'start_hour_utc': {'key': 'startHourUtc', 'type': 'int'},
'maintenance_window': {'key': 'maintenanceWindow', 'type': 'duration'},
}
def __init__(
self,
*,
day_of_week: Union[str, "DayOfWeek"],
start_hour_utc: int,
maintenance_window: Optional[datetime.timedelta] = None,
**kwargs
):
super(ScheduleEntry, self).__init__(**kwargs)
self.day_of_week = day_of_week
self.start_hour_utc = start_hour_utc
self.maintenance_window = maintenance_window
class Sku(msrest.serialization.Model):
"""SKU parameters supplied to the create Redis operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The type of Redis cache to deploy. Valid values: (Basic, Standard,
Premium). Possible values include: "Basic", "Standard", "Premium".
:type name: str or ~azure.mgmt.redis.models.SkuName
:param family: Required. The SKU family to use. Valid values: (C, P). (C = Basic/Standard, P =
Premium). Possible values include: "C", "P".
:type family: str or ~azure.mgmt.redis.models.SkuFamily
:param capacity: Required. The size of the Redis cache to deploy. Valid values: for C
(Basic/Standard) family (0, 1, 2, 3, 4, 5, 6), for P (Premium) family (1, 2, 3, 4).
:type capacity: int
"""
_validation = {
'name': {'required': True},
'family': {'required': True},
'capacity': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
*,
name: Union[str, "SkuName"],
family: Union[str, "SkuFamily"],
capacity: int,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = name
self.family = family
self.capacity = capacity
class UpgradeNotification(msrest.serialization.Model):
"""Properties of upgrade notification.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of upgrade notification.
:vartype name: str
:ivar timestamp: Timestamp when upgrade notification occurred.
:vartype timestamp: ~datetime.datetime
:ivar upsell_notification: Details about this upgrade notification.
:vartype upsell_notification: dict[str, str]
"""
_validation = {
'name': {'readonly': True},
'timestamp': {'readonly': True},
'upsell_notification': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'upsell_notification': {'key': 'upsellNotification', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(UpgradeNotification, self).__init__(**kwargs)
self.name = None
self.timestamp = None
self.upsell_notification = None
|
djkonro/client-python | refs/heads/master | kubernetes/test/test_v1_token_review_spec.py | 2 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_token_review_spec import V1TokenReviewSpec
class TestV1TokenReviewSpec(unittest.TestCase):
""" V1TokenReviewSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1TokenReviewSpec(self):
"""
Test V1TokenReviewSpec
"""
model = kubernetes.client.models.v1_token_review_spec.V1TokenReviewSpec()
if __name__ == '__main__':
unittest.main()
|
siskin/bluetooth-next | refs/heads/feature-rpl | tools/perf/scripts/python/check-perf-trace.py | 1997 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
billyhunt/osf.io | refs/heads/develop | scripts/consistency/impute_log_date.py | 64 | """Due to an unknown bug, a handful of logs were saved without dates. This
script identifies logs without dates and imputes dates using ObjectIds.
Dry run: python -m scripts/consistency/impute_log_date
Real: python -m scripts/consistency/impute_log_date false
"""
from bson import ObjectId
from website.app import init_app
from website import models
from framework import Q
app = init_app()
def impute_log_date(dry_run=True):
no_date = models.NodeLog.find(
Q('date', 'eq', None)
)
for log in no_date:
oid = ObjectId(log._primary_key)
imputed_date = oid.generation_time
print u'Imputing date {} for log ID {}'.format(
imputed_date.strftime('%c'),
log._primary_key,
)
if not dry_run:
log._fields['date'].__set__(log, imputed_date, safe=True)
log.save()
if __name__ == '__main__':
import sys
dry_run = len(sys.argv) == 1 or sys.argv[1].lower() not in ['f', 'false']
impute_log_date(dry_run=dry_run)
|
javierder/dogestart.me | refs/heads/master | django/bin/daily_cleanup.py | 113 | #!/usr/bin/env python
"""
Daily cleanup job.
Can be run as a cronjob to clean out old data from the database (only expired
sessions at the moment).
"""
import warnings
from django.core import management
if __name__ == "__main__":
warnings.warn(
"The `daily_cleanup` script has been deprecated "
"in favor of `django-admin.py clearsessions`.",
DeprecationWarning)
management.call_command('clearsessions')
|
siasoft/hokkeist | refs/heads/master | backend/web/coloradmin/plugins/jquery-file-upload/server/gae-python/main.py | 242 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.1.1
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'http://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
key = self.request.get('key') or ''
blobstore.delete(key)
s = json.dumps({key: True}, separators=(',', ':'))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
|
simplyguru-dot/ansible | refs/heads/devel | lib/ansible/playbook/conditional.py | 48 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.exceptions import UndefinedError
from six import text_type
from ansible.errors import *
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=[])
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [ value ])
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
# since this is a mixin, it may not have an underlying datastructure
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
try:
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
except Exception as e:
raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (conditional, e), obj=ds)
return True
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if conditional is None or conditional == '':
return True
if conditional in all_vars and '-' not in text_type(all_vars[conditional]):
conditional = all_vars[conditional]
# make sure the templar is using the variables specifed to this method
templar.set_available_variables(variables=all_vars)
conditional = templar.template(conditional)
if not isinstance(conditional, basestring) or conditional == "":
return conditional
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = templar.template(presented, fail_on_undefined=False)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if "is undefined" in original:
return True
elif "is defined" in original:
return False
else:
raise AnsibleError("error while evaluating conditional: %s (%s)" % (original, presented))
elif val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
|
phe-bioinformatics/snapperdb | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
from distutils.core import setup
import os
import sys
import pip
from pip.req import parse_requirements
def get_version():
version_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), "VERSION")
version = "N/A"
if os.path.exists(version_file):
try:
with open(version_file) as fp:
version = fp.next().strip()
except IOError:
pass
return version
# At the time of writing there is an open issue on pip > 6.0
# Where session is required parameter. Breaks backwards compatibility.
if int(pip.__version__.split(".")[0]) >= 6:
install_reqs = parse_requirements('requirements.txt', session=False)
else:
install_reqs = parse_requirements('requirements.txt')
install_requires = [str(ir.req) for ir in install_reqs]
setup(name='snapperdb',
version=get_version(),
description='Public Health England(UK) SNP calling pipeline tools.',
author='Public Health England',
author_email='[email protected]',
url='https://github.com/phe-bioinformatics/snapperdb/',
download_url='https://github.com/phe-bioinformatics/snapperdb/archive/v1.0.6.tar.gz',
packages=['snapperdb','snapperdb.snpdb','snapperdb.gbru_vcf'],
include_package_data=True,
package_data={'snpdb': ['snapperdb/snpdb/template_snapperdb_denovo_refs_sql']},
scripts=['run_snapperdb.py'],
install_requires=install_requires
)
|
CloudWareChile/OpenChile | refs/heads/master | openerp/addons/audittrail/__init__.py | 65 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import audittrail
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kennedyshead/home-assistant | refs/heads/dev | homeassistant/components/demo/sensor.py | 2 | """Demo platform that has a couple of fake sensors."""
from homeassistant.components.sensor import STATE_CLASS_MEASUREMENT, SensorEntity
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_CO,
DEVICE_CLASS_CO2,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
from . import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo sensors."""
async_add_entities(
[
DemoSensor(
"sensor_1",
"Outside Temperature",
15.6,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
TEMP_CELSIUS,
12,
),
DemoSensor(
"sensor_2",
"Outside Humidity",
54,
DEVICE_CLASS_HUMIDITY,
STATE_CLASS_MEASUREMENT,
PERCENTAGE,
None,
),
DemoSensor(
"sensor_3",
"Carbon monoxide",
54,
DEVICE_CLASS_CO,
STATE_CLASS_MEASUREMENT,
CONCENTRATION_PARTS_PER_MILLION,
None,
),
DemoSensor(
"sensor_4",
"Carbon dioxide",
54,
DEVICE_CLASS_CO2,
STATE_CLASS_MEASUREMENT,
CONCENTRATION_PARTS_PER_MILLION,
14,
),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoSensor(SensorEntity):
"""Representation of a Demo sensor."""
def __init__(
self,
unique_id,
name,
state,
device_class,
state_class,
unit_of_measurement,
battery,
):
"""Initialize the sensor."""
self._battery = battery
self._device_class = device_class
self._name = name
self._state = state
self._state_class = state_class
self._unique_id = unique_id
self._unit_of_measurement = unit_of_measurement
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self.unique_id)
},
"name": self.name,
}
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def should_poll(self):
"""No polling needed for a demo sensor."""
return False
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state_class(self):
"""Return the state class of the sensor."""
return self._state_class
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._battery:
return {ATTR_BATTERY_LEVEL: self._battery}
|
samuelhavron/heroku-buildpack-python | refs/heads/master | Python-3.4.3/Lib/mailbox.py | 72 | """Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
#
# Remember to fsync() changes to disk before closing a modified file
# or returning from a flush() method. See functions _sync_flush() and
# _sync_close().
import os
import time
import calendar
import socket
import errno
import copy
import warnings
import email
import email.message
import email.generator
import io
import contextlib
try:
import fcntl
except ImportError:
fcntl = None
__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
'BabylMessage', 'MMDFMessage']
linesep = os.linesep.encode('ascii')
class Mailbox:
"""A group of messages in a particular place."""
def __init__(self, path, factory=None, create=True):
"""Initialize a Mailbox instance."""
self._path = os.path.abspath(os.path.expanduser(path))
self._factory = factory
def add(self, message):
"""Add message and return assigned key."""
raise NotImplementedError('Method must be implemented by subclass')
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def __delitem__(self, key):
self.remove(key)
def discard(self, key):
"""If the keyed message exists, remove it."""
try:
self.remove(key)
except KeyError:
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def get(self, key, default=None):
"""Return the keyed message, or default if it doesn't exist."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
with contextlib.closing(self.get_file(key)) as file:
return self._factory(file)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_string(self, key):
"""Return a string representation or raise a KeyError.
Uses email.message.Message to create a 7bit clean string
representation of the message."""
return email.message_from_bytes(self.get_bytes(key)).as_string()
def get_bytes(self, key):
"""Return a byte string representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def iterkeys(self):
"""Return an iterator over keys."""
raise NotImplementedError('Method must be implemented by subclass')
def keys(self):
"""Return a list of keys."""
return list(self.iterkeys())
def itervalues(self):
"""Return an iterator over all messages."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield value
def __iter__(self):
return self.itervalues()
def values(self):
"""Return a list of messages. Memory intensive."""
return list(self.itervalues())
def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield (key, value)
def items(self):
"""Return a list of (key, message) tuples. Memory intensive."""
return list(self.iteritems())
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
raise NotImplementedError('Method must be implemented by subclass')
def __len__(self):
"""Return a count of messages in the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def clear(self):
"""Delete all messages."""
for key in self.keys():
self.discard(key)
def pop(self, key, default=None):
"""Delete the keyed message and return it, or default."""
try:
result = self[key]
except KeyError:
return default
self.discard(key)
return result
def popitem(self):
"""Delete an arbitrary (key, message) pair and return it."""
for key in self.iterkeys():
return (key, self.pop(key)) # This is only run once.
else:
raise KeyError('No messages in mailbox')
def update(self, arg=None):
"""Change the messages that correspond to certain keys."""
if hasattr(arg, 'iteritems'):
source = arg.iteritems()
elif hasattr(arg, 'items'):
source = arg.items()
else:
source = arg
bad_key = False
for key, message in source:
try:
self[key] = message
except KeyError:
bad_key = True
if bad_key:
raise KeyError('No message with key(s)')
def flush(self):
"""Write any pending changes to the disk."""
raise NotImplementedError('Method must be implemented by subclass')
def lock(self):
"""Lock the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def unlock(self):
"""Unlock the mailbox if it is locked."""
raise NotImplementedError('Method must be implemented by subclass')
def close(self):
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def _string_to_bytes(self, message):
# If a message is not 7bit clean, we refuse to handle it since it
# likely came from reading invalid messages in text mode, and that way
# lies mojibake.
try:
return message.encode('ascii')
except UnicodeError:
raise ValueError("String input must be ASCII-only; "
"use bytes or a Message instead")
# Whether each message must end in a newline
_append_newline = False
def _dump_message(self, message, target, mangle_from_=False):
# This assumes the target file is open in binary mode.
"""Dump message contents to target file."""
if isinstance(message, email.message.Message):
buffer = io.BytesIO()
gen = email.generator.BytesGenerator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
data = buffer.read()
data = data.replace(b'\n', linesep)
target.write(data)
if self._append_newline and not data.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
elif isinstance(message, (str, bytes, io.StringIO)):
if isinstance(message, io.StringIO):
warnings.warn("Use of StringIO input is deprecated, "
"use BytesIO instead", DeprecationWarning, 3)
message = message.getvalue()
if isinstance(message, str):
message = self._string_to_bytes(message)
if mangle_from_:
message = message.replace(b'\nFrom ', b'\n>From ')
message = message.replace(b'\n', linesep)
target.write(message)
if self._append_newline and not message.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
elif hasattr(message, 'read'):
if hasattr(message, 'buffer'):
warnings.warn("Use of text mode files is deprecated, "
"use a binary mode file instead", DeprecationWarning, 3)
message = message.buffer
lastline = None
while True:
line = message.readline()
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + b'\n'
elif line.endswith(b'\r'):
line = line[:-1] + b'\n'
if not line:
break
if mangle_from_ and line.startswith(b'From '):
line = b'>From ' + line[5:]
line = line.replace(b'\n', linesep)
target.write(line)
lastline = line
if self._append_newline and lastline and not lastline.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
else:
raise TypeError('Invalid message type: %s' % type(message))
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
colon = ':'
def __init__(self, dirname, factory=None, create=True):
"""Initialize a Maildir instance."""
Mailbox.__init__(self, dirname, factory, create)
self._paths = {
'tmp': os.path.join(self._path, 'tmp'),
'new': os.path.join(self._path, 'new'),
'cur': os.path.join(self._path, 'cur'),
}
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0o700)
for path in self._paths.values():
os.mkdir(path, 0o700)
else:
raise NoSuchMailboxError(self._path)
self._toc = {}
self._toc_mtimes = {'cur': 0, 'new': 0}
self._last_read = 0 # Records last time we read cur/new
self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing
def add(self, message):
"""Add message and return assigned key."""
tmp_file = self._create_tmp()
try:
self._dump_message(message, tmp_file)
except BaseException:
tmp_file.close()
os.remove(tmp_file.name)
raise
_sync_close(tmp_file)
if isinstance(message, MaildirMessage):
subdir = message.get_subdir()
suffix = self.colon + message.get_info()
if suffix == self.colon:
suffix = ''
else:
subdir = 'new'
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
if isinstance(message, MaildirMessage):
os.utime(tmp_file.name,
(os.path.getatime(tmp_file.name), message.get_date()))
# No file modification should be done after the file is moved to its
# final position in order to prevent race conditions with changes
# from other programs
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
os.remove(tmp_file.name)
else:
os.rename(tmp_file.name, dest)
except OSError as e:
os.remove(tmp_file.name)
if e.errno == errno.EEXIST:
raise ExternalClashError('Name clash with existing message: %s'
% dest)
else:
raise
return uniq
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key)))
def discard(self, key):
"""If the keyed message exists, remove it."""
# This overrides an inapplicable implementation in the superclass.
try:
self.remove(key)
except (KeyError, FileNotFoundError):
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
old_subpath = self._lookup(key)
temp_key = self.add(message)
temp_subpath = self._lookup(temp_key)
if isinstance(message, MaildirMessage):
# temp's subdir and suffix were specified by message.
dominant_subpath = temp_subpath
else:
# temp's subdir and suffix were defaults from add().
dominant_subpath = old_subpath
subdir = os.path.dirname(dominant_subpath)
if self.colon in dominant_subpath:
suffix = self.colon + dominant_subpath.split(self.colon)[-1]
else:
suffix = ''
self.discard(key)
tmp_path = os.path.join(self._path, temp_subpath)
new_path = os.path.join(self._path, subdir, key + suffix)
if isinstance(message, MaildirMessage):
os.utime(tmp_path,
(os.path.getatime(tmp_path), message.get_date()))
# No file modification should be done after the file is moved to its
# final position in order to prevent race conditions with changes
# from other programs
os.rename(tmp_path, new_path)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
subpath = self._lookup(key)
with open(os.path.join(self._path, subpath), 'rb') as f:
if self._factory:
msg = self._factory(f)
else:
msg = MaildirMessage(f)
subdir, name = os.path.split(subpath)
msg.set_subdir(subdir)
if self.colon in name:
msg.set_info(name.split(self.colon)[-1])
msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
return msg
def get_bytes(self, key):
"""Return a bytes representation or raise a KeyError."""
with open(os.path.join(self._path, self._lookup(key)), 'rb') as f:
return f.read().replace(linesep, b'\n')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'rb')
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
self._refresh()
for key in self._toc:
try:
self._lookup(key)
except KeyError:
continue
yield key
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._refresh()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._refresh()
return len(self._toc)
def flush(self):
"""Write any pending changes to disk."""
# Maildir changes are always written immediately, so there's nothing
# to do.
pass
def lock(self):
"""Lock the mailbox."""
return
def unlock(self):
"""Unlock the mailbox if it is locked."""
return
def close(self):
"""Flush and close the mailbox."""
return
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if len(entry) > 1 and entry[0] == '.' and \
os.path.isdir(os.path.join(self._path, entry)):
result.append(entry[1:])
return result
def get_folder(self, folder):
"""Return a Maildir instance for the named folder."""
return Maildir(os.path.join(self._path, '.' + folder),
factory=self._factory,
create=False)
def add_folder(self, folder):
"""Create a folder and return a Maildir instance representing it."""
path = os.path.join(self._path, '.' + folder)
result = Maildir(path, factory=self._factory)
maildirfolder_path = os.path.join(path, 'maildirfolder')
if not os.path.exists(maildirfolder_path):
os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY,
0o666))
return result
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, '.' + folder)
for entry in os.listdir(os.path.join(path, 'new')) + \
os.listdir(os.path.join(path, 'cur')):
if len(entry) < 1 or entry[0] != '.':
raise NotEmptyError('Folder contains message(s): %s' % folder)
for entry in os.listdir(path):
if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
os.path.isdir(os.path.join(path, entry)):
raise NotEmptyError("Folder contains subdirectory '%s': %s" %
(folder, entry))
for root, dirs, files in os.walk(path, topdown=False):
for entry in files:
os.remove(os.path.join(root, entry))
for entry in dirs:
os.rmdir(os.path.join(root, entry))
os.rmdir(path)
def clean(self):
"""Delete old files in "tmp"."""
now = time.time()
for entry in os.listdir(os.path.join(self._path, 'tmp')):
path = os.path.join(self._path, 'tmp', entry)
if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
os.remove(path)
_count = 1 # This is used to generate unique file names.
def _create_tmp(self):
"""Create a file in the tmp subdirectory and open and return it."""
now = time.time()
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
Maildir._count, hostname)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except FileNotFoundError:
Maildir._count += 1
try:
return _create_carefully(path)
except FileExistsError:
pass
# Fall through to here if stat succeeded or open raised EEXIST.
raise ExternalClashError('Name clash prevented file creation: %s' %
path)
def _refresh(self):
"""Update table of contents mapping."""
# If it has been less than two seconds since the last _refresh() call,
# we have to unconditionally re-read the mailbox just in case it has
# been modified, because os.path.mtime() has a 2 sec resolution in the
# most common worst case (FAT) and a 1 sec resolution typically. This
# results in a few unnecessary re-reads when _refresh() is called
# multiple times in that interval, but once the clock ticks over, we
# will only re-read as needed. Because the filesystem might be being
# served by an independent system with its own clock, we record and
# compare with the mtimes from the filesystem. Because the other
# system's clock might be skewing relative to our clock, we add an
# extra delta to our wait. The default is one tenth second, but is an
# instance variable and so can be adjusted if dealing with a
# particularly skewed or irregular system.
if time.time() - self._last_read > 2 + self._skewfactor:
refresh = False
for subdir in self._toc_mtimes:
mtime = os.path.getmtime(self._paths[subdir])
if mtime > self._toc_mtimes[subdir]:
refresh = True
self._toc_mtimes[subdir] = mtime
if not refresh:
return
# Refresh toc
self._toc = {}
for subdir in self._toc_mtimes:
path = self._paths[subdir]
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
self._last_read = time.time()
def _lookup(self, key):
"""Use TOC to return subpath for given key, or raise a KeyError."""
try:
if os.path.exists(os.path.join(self._path, self._toc[key])):
return self._toc[key]
except KeyError:
pass
self._refresh()
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
# This method is for backward compatibility only.
def next(self):
"""Return the next message in a one-time iteration."""
if not hasattr(self, '_onetime_keys'):
self._onetime_keys = self.iterkeys()
while True:
try:
return self[next(self._onetime_keys)]
except StopIteration:
return None
except KeyError:
continue
class _singlefileMailbox(Mailbox):
"""A single-file mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize a single-file mailbox."""
Mailbox.__init__(self, path, factory, create)
try:
f = open(self._path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
if create:
f = open(self._path, 'wb+')
else:
raise NoSuchMailboxError(self._path)
elif e.errno in (errno.EACCES, errno.EROFS):
f = open(self._path, 'rb')
else:
raise
self._file = f
self._toc = None
self._next_key = 0
self._pending = False # No changes require rewriting the file.
self._pending_sync = False # No need to sync the file
self._locked = False
self._file_length = None # Used to record mailbox size
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
# _append_message appends the message to the mailbox file. We
# don't need a full rewrite + rename, sync is enough.
self._pending_sync = True
return self._next_key - 1
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
self._toc[key] = self._append_message(message)
self._pending = True
def iterkeys(self):
"""Return an iterator over keys."""
self._lookup()
yield from self._toc.keys()
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._lookup()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._lookup()
return len(self._toc)
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
self._locked = False
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
if self._pending_sync:
# Messages have only been added, so syncing the file
# is enough.
_sync_flush(self._file)
self._pending_sync = False
return
# In order to be writing anything out at all, self._toc must
# already have been generated (and presumably has been modified
# by adding or deleting an item).
assert self._toc is not None
# Check length of self._file; if it's changed, some other process
# has modified the mailbox since we scanned it.
self._file.seek(0, 2)
cur_len = self._file.tell()
if cur_len != self._file_length:
raise ExternalClashError('Size of mailbox file changed '
'(expected %i, found %i)' %
(self._file_length, cur_len))
new_file = _create_temporary(self._path)
try:
new_toc = {}
self._pre_mailbox_hook(new_file)
for key in sorted(self._toc.keys()):
start, stop = self._toc[key]
self._file.seek(start)
self._pre_message_hook(new_file)
new_start = new_file.tell()
while True:
buffer = self._file.read(min(4096,
stop - self._file.tell()))
if not buffer:
break
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
self._file_length = new_file.tell()
except:
new_file.close()
os.remove(new_file.name)
raise
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
# Make sure the new file's mode is the same as the old file's
mode = os.stat(self._path).st_mode
os.chmod(new_file.name, mode)
try:
os.rename(new_file.name, self._path)
except FileExistsError:
os.remove(self._path)
os.rename(new_file.name, self._path)
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
self._pending_sync = False
if self._locked:
_lock_file(self._file, dotlock=False)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
return
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
return
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
return
def close(self):
"""Flush and close the mailbox."""
self.flush()
if self._locked:
self.unlock()
self._file.close() # Sync has been done by self.flush() above.
def _lookup(self, key=None):
"""Return (start, stop) or raise KeyError."""
if self._toc is None:
self._generate_toc()
if key is not None:
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
def _append_message(self, message):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
before = self._file.tell()
if len(self._toc) == 0 and not self._pending:
# This is the first message, and the _pre_mailbox_hook
# hasn't yet been called. If self._pending is True,
# messages have been removed, so _pre_mailbox_hook must
# have been called already.
self._pre_mailbox_hook(self._file)
try:
self._pre_message_hook(self._file)
offsets = self._install_message(message)
self._post_message_hook(self._file)
except BaseException:
self._file.truncate(before)
raise
self._file.flush()
self._file_length = self._file.tell() # Record current length of mailbox
return offsets
class _mboxMMDF(_singlefileMailbox):
"""An mbox or MMDF mailbox."""
_mangle_from_ = True
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(linesep, b'')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(linesep, b'\n'))
msg.set_from(from_line[5:].decode('ascii'))
return msg
def get_string(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
return email.message_from_bytes(
self.get_bytes(key)).as_string(unixfrom=from_)
def get_bytes(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
string = self._file.read(stop - self._file.tell())
return string.replace(linesep, b'\n')
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
def _install_message(self, message):
"""Format a message and blindly write to self._file."""
from_line = None
if isinstance(message, str):
message = self._string_to_bytes(message)
if isinstance(message, bytes) and message.startswith(b'From '):
newline = message.find(b'\n')
if newline != -1:
from_line = message[:newline]
message = message[newline + 1:]
else:
from_line = message
message = b''
elif isinstance(message, _mboxMMDFMessage):
author = message.get_from().encode('ascii')
from_line = b'From ' + author
elif isinstance(message, email.message.Message):
from_line = message.get_unixfrom() # May be None.
if from_line is not None:
from_line = from_line.encode('ascii')
if from_line is None:
from_line = b'From MAILER-DAEMON ' + time.asctime(time.gmtime()).encode()
start = self._file.tell()
self._file.write(from_line + linesep)
self._dump_message(message, self._file, self._mangle_from_)
stop = self._file.tell()
return (start, stop)
class mbox(_mboxMMDF):
"""A classic mbox mailbox."""
_mangle_from_ = True
# All messages must end in a newline character, and
# _post_message_hooks outputs an empty line between messages.
_append_newline = True
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
last_was_empty = False
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith(b'From '):
if len(stops) < len(starts):
if last_was_empty:
stops.append(line_pos - len(linesep))
else:
# The last line before the "From " line wasn't
# blank, but we consider it a start of a
# message anyway.
stops.append(line_pos)
starts.append(line_pos)
last_was_empty = False
elif not line:
if last_was_empty:
stops.append(line_pos - len(linesep))
else:
stops.append(line_pos)
break
elif line == linesep:
last_was_empty = True
else:
last_was_empty = False
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file_length = self._file.tell()
class MMDF(_mboxMMDF):
"""An MMDF mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MMDF mailbox."""
self._message_factory = MMDFMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write(b'\001\001\001\001' + linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep + b'\001\001\001\001' + linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line.startswith(b'\001\001\001\001' + linesep):
starts.append(next_pos)
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == b'\001\001\001\001' + linesep:
stops.append(line_pos - len(linesep))
break
elif not line:
stops.append(line_pos)
break
elif not line:
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
class MH(Mailbox):
"""An MH mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MH instance."""
Mailbox.__init__(self, path, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0o700)
os.close(os.open(os.path.join(self._path, '.mh_sequences'),
os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600))
else:
raise NoSuchMailboxError(self._path)
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
keys = self.keys()
if len(keys) == 0:
new_key = 1
else:
new_key = max(keys) + 1
new_path = os.path.join(self._path, str(new_key))
f = _create_carefully(new_path)
closed = False
try:
if self._locked:
_lock_file(f)
try:
try:
self._dump_message(message, f)
except BaseException:
# Unlock and close so it can be deleted on Windows
if self._locked:
_unlock_file(f)
_sync_close(f)
closed = True
os.remove(new_path)
raise
if isinstance(message, MHMessage):
self._dump_sequences(message, new_key)
finally:
if self._locked:
_unlock_file(f)
finally:
if not closed:
_sync_close(f)
return new_key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
else:
f.close()
os.remove(path)
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'rb+')
else:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
with f:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
for name, key_list in self.get_sequences().items():
if key in key_list:
msg.add_sequence(name)
return msg
def get_bytes(self, key):
"""Return a bytes representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'rb+')
else:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
with f:
if self._locked:
_lock_file(f)
try:
return f.read().replace(linesep, b'\n')
finally:
if self._locked:
_unlock_file(f)
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
try:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
return iter(sorted(int(entry) for entry in os.listdir(self._path)
if entry.isdigit()))
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
return os.path.exists(os.path.join(self._path, str(key)))
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
def lock(self):
"""Lock the mailbox."""
if not self._locked:
self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
_sync_close(self._file)
del self._file
self._locked = False
def flush(self):
"""Write any pending changes to the disk."""
return
def close(self):
"""Flush and close the mailbox."""
if self._locked:
self.unlock()
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if os.path.isdir(os.path.join(self._path, entry)):
result.append(entry)
return result
def get_folder(self, folder):
"""Return an MH instance for the named folder."""
return MH(os.path.join(self._path, folder),
factory=self._factory, create=False)
def add_folder(self, folder):
"""Create a folder and return an MH instance representing it."""
return MH(os.path.join(self._path, folder),
factory=self._factory)
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, folder)
entries = os.listdir(path)
if entries == ['.mh_sequences']:
os.remove(os.path.join(path, '.mh_sequences'))
elif entries == []:
pass
else:
raise NotEmptyError('Folder not empty: %s' % self._path)
os.rmdir(path)
def get_sequences(self):
"""Return a name-to-key-list dictionary to define each sequence."""
results = {}
with open(os.path.join(self._path, '.mh_sequences'), 'r', encoding='ASCII') as f:
all_keys = set(self.keys())
for line in f:
try:
name, contents = line.split(':')
keys = set()
for spec in contents.split():
if spec.isdigit():
keys.add(int(spec))
else:
start, stop = (int(x) for x in spec.split('-'))
keys.update(range(start, stop + 1))
results[name] = [key for key in sorted(keys) \
if key in all_keys]
if len(results[name]) == 0:
del results[name]
except ValueError:
raise FormatError('Invalid sequence specification: %s' %
line.rstrip())
return results
def set_sequences(self, sequences):
"""Set sequences using the given name-to-key-list dictionary."""
f = open(os.path.join(self._path, '.mh_sequences'), 'r+', encoding='ASCII')
try:
os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
for name, keys in sequences.items():
if len(keys) == 0:
continue
f.write(name + ':')
prev = None
completing = False
for key in sorted(set(keys)):
if key - 1 == prev:
if not completing:
completing = True
f.write('-')
elif completing:
completing = False
f.write('%s %s' % (prev, key))
else:
f.write(' %s' % key)
prev = key
if completing:
f.write(str(prev) + '\n')
else:
f.write('\n')
finally:
_sync_close(f)
def pack(self):
"""Re-name messages to eliminate numbering gaps. Invalidates keys."""
sequences = self.get_sequences()
prev = 0
changes = []
for key in self.iterkeys():
if key - 1 != prev:
changes.append((key, prev + 1))
if hasattr(os, 'link'):
os.link(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
os.unlink(os.path.join(self._path, str(key)))
else:
os.rename(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
prev += 1
self._next_key = prev + 1
if len(changes) == 0:
return
for name, key_list in sequences.items():
for old, new in changes:
if old in key_list:
key_list[key_list.index(old)] = new
self.set_sequences(sequences)
def _dump_sequences(self, message, key):
"""Inspect a new MHMessage and update sequences appropriately."""
pending_sequences = message.get_sequences()
all_sequences = self.get_sequences()
for name, key_list in all_sequences.items():
if name in pending_sequences:
key_list.append(key)
elif key in key_list:
del key_list[key_list.index(key)]
for sequence in pending_sequences:
if sequence not in all_sequences:
all_sequences[sequence] = [key]
self.set_sequences(all_sequences)
class Babyl(_singlefileMailbox):
"""An Rmail-style Babyl mailbox."""
_special_labels = frozenset(('unseen', 'deleted', 'filed', 'answered',
'forwarded', 'edited', 'resent'))
def __init__(self, path, factory=None, create=True):
"""Initialize a Babyl mailbox."""
_singlefileMailbox.__init__(self, path, factory, create)
self._labels = {}
def add(self, message):
"""Add message and return assigned key."""
key = _singlefileMailbox.add(self, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
return key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.remove(self, key)
if key in self._labels:
del self._labels[key]
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.__setitem__(self, key, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip b'1,' line specifying labels.
original_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == b'*** EOOH ***' + linesep or not line:
break
original_headers.write(line.replace(linesep, b'\n'))
visible_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == linesep or not line:
break
visible_headers.write(line.replace(linesep, b'\n'))
# Read up to the stop, or to the end
n = stop - self._file.tell()
assert n >= 0
body = self._file.read(n)
body = body.replace(linesep, b'\n')
msg = BabylMessage(original_headers.getvalue() + body)
msg.set_visible(visible_headers.getvalue())
if key in self._labels:
msg.set_labels(self._labels[key])
return msg
def get_bytes(self, key):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip b'1,' line specifying labels.
original_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == b'*** EOOH ***' + linesep or not line:
break
original_headers.write(line.replace(linesep, b'\n'))
while True:
line = self._file.readline()
if line == linesep or not line:
break
headers = original_headers.getvalue()
n = stop - self._file.tell()
assert n >= 0
data = self._file.read(n)
data = data.replace(linesep, b'\n')
return headers + data
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return io.BytesIO(self.get_bytes(key).replace(b'\n', linesep))
def get_labels(self):
"""Return a list of user-defined labels in the mailbox."""
self._lookup()
labels = set()
for label_list in self._labels.values():
labels.update(label_list)
labels.difference_update(self._special_labels)
return list(labels)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
label_lists = []
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == b'\037\014' + linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(linesep))
starts.append(next_pos)
labels = [label.strip() for label
in self._file.readline()[1:].split(b',')
if label.strip()]
label_lists.append(labels)
elif line == b'\037' or line == b'\037' + linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(linesep))
elif not line:
stops.append(line_pos - len(linesep))
break
self._toc = dict(enumerate(zip(starts, stops)))
self._labels = dict(enumerate(label_lists))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
babyl = b'BABYL OPTIONS:' + linesep
babyl += b'Version: 5' + linesep
labels = self.get_labels()
labels = (label.encode() for label in labels)
babyl += b'Labels:' + b','.join(labels) + linesep
babyl += b'\037'
f.write(babyl)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write(b'\014' + linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep + b'\037')
def _install_message(self, message):
"""Write message contents and return (start, stop)."""
start = self._file.tell()
if isinstance(message, BabylMessage):
special_labels = []
labels = []
for label in message.get_labels():
if label in self._special_labels:
special_labels.append(label)
else:
labels.append(label)
self._file.write(b'1')
for label in special_labels:
self._file.write(b', ' + label.encode())
self._file.write(b',,')
for label in labels:
self._file.write(b' ' + label.encode() + b',')
self._file.write(linesep)
else:
self._file.write(b'1,,' + linesep)
if isinstance(message, email.message.Message):
orig_buffer = io.BytesIO()
orig_generator = email.generator.BytesGenerator(orig_buffer, False, 0)
orig_generator.flatten(message)
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
self._file.write(b'*** EOOH ***' + linesep)
if isinstance(message, BabylMessage):
vis_buffer = io.BytesIO()
vis_generator = email.generator.BytesGenerator(vis_buffer, False, 0)
vis_generator.flatten(message.get_visible())
while True:
line = vis_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
else:
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
while True:
buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
if not buffer:
break
self._file.write(buffer.replace(b'\n', linesep))
elif isinstance(message, (bytes, str, io.StringIO)):
if isinstance(message, io.StringIO):
warnings.warn("Use of StringIO input is deprecated, "
"use BytesIO instead", DeprecationWarning, 3)
message = message.getvalue()
if isinstance(message, str):
message = self._string_to_bytes(message)
body_start = message.find(b'\n\n') + 2
if body_start - 2 != -1:
self._file.write(message[:body_start].replace(b'\n', linesep))
self._file.write(b'*** EOOH ***' + linesep)
self._file.write(message[:body_start].replace(b'\n', linesep))
self._file.write(message[body_start:].replace(b'\n', linesep))
else:
self._file.write(b'*** EOOH ***' + linesep + linesep)
self._file.write(message.replace(b'\n', linesep))
elif hasattr(message, 'readline'):
if hasattr(message, 'buffer'):
warnings.warn("Use of text mode files is deprecated, "
"use a binary mode file instead", DeprecationWarning, 3)
message = message.buffer
original_pos = message.tell()
first_pass = True
while True:
line = message.readline()
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + b'\n'
elif line.endswith(b'\r'):
line = line[:-1] + b'\n'
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
if first_pass:
first_pass = False
self._file.write(b'*** EOOH ***' + linesep)
message.seek(original_pos)
else:
break
while True:
line = message.readline()
if not line:
break
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + linesep
elif line.endswith(b'\r'):
line = line[:-1] + linesep
elif line.endswith(b'\n'):
line = line[:-1] + linesep
self._file.write(line)
else:
raise TypeError('Invalid message type: %s' % type(message))
stop = self._file.tell()
return (start, stop)
class Message(email.message.Message):
"""Message with mailbox-format-specific properties."""
def __init__(self, message=None):
"""Initialize a Message instance."""
if isinstance(message, email.message.Message):
self._become_message(copy.deepcopy(message))
if isinstance(message, Message):
message._explain_to(self)
elif isinstance(message, bytes):
self._become_message(email.message_from_bytes(message))
elif isinstance(message, str):
self._become_message(email.message_from_string(message))
elif isinstance(message, io.TextIOWrapper):
self._become_message(email.message_from_file(message))
elif hasattr(message, "read"):
self._become_message(email.message_from_binary_file(message))
elif message is None:
email.message.Message.__init__(self)
else:
raise TypeError('Invalid message type: %s' % type(message))
def _become_message(self, message):
"""Assume the non-format-specific state of message."""
type_specific = getattr(message, '_type_specific_attributes', [])
for name in message.__dict__:
if name not in type_specific:
self.__dict__[name] = message.__dict__[name]
def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type')
class MaildirMessage(Message):
"""Message with Maildir-specific properties."""
_type_specific_attributes = ['_subdir', '_info', '_date']
def __init__(self, message=None):
"""Initialize a MaildirMessage instance."""
self._subdir = 'new'
self._info = ''
self._date = time.time()
Message.__init__(self, message)
def get_subdir(self):
"""Return 'new' or 'cur'."""
return self._subdir
def set_subdir(self, subdir):
"""Set subdir to 'new' or 'cur'."""
if subdir == 'new' or subdir == 'cur':
self._subdir = subdir
else:
raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
def get_flags(self):
"""Return as a string the flags that are set."""
if self._info.startswith('2,'):
return self._info[2:]
else:
return ''
def set_flags(self, flags):
"""Set the given flags and unset all others."""
self._info = '2,' + ''.join(sorted(flags))
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if self.get_flags():
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
def set_date(self, date):
"""Set delivery date of message, in seconds since the epoch."""
try:
self._date = float(date)
except ValueError:
raise TypeError("can't convert to float: %s" % date)
def get_info(self):
"""Get the message's "info" as a string."""
return self._info
def set_info(self, info):
"""Set the message's "info" string."""
if isinstance(info, str):
self._info = info
else:
raise TypeError('info must be a string: %s' % type(info))
def _explain_to(self, message):
"""Copy Maildir-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
message.set_flags(self.get_flags())
message.set_subdir(self.get_subdir())
message.set_date(self.get_date())
elif isinstance(message, _mboxMMDFMessage):
flags = set(self.get_flags())
if 'S' in flags:
message.add_flag('R')
if self.get_subdir() == 'cur':
message.add_flag('O')
if 'T' in flags:
message.add_flag('D')
if 'F' in flags:
message.add_flag('F')
if 'R' in flags:
message.add_flag('A')
message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_sequence('unseen')
if 'R' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_label('unseen')
if 'T' in flags:
message.add_label('deleted')
if 'R' in flags:
message.add_label('answered')
if 'P' in flags:
message.add_label('forwarded')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class _mboxMMDFMessage(Message):
"""Message with mbox- or MMDF-specific properties."""
_type_specific_attributes = ['_from']
def __init__(self, message=None):
"""Initialize an mboxMMDFMessage instance."""
self.set_from('MAILER-DAEMON', True)
if isinstance(message, email.message.Message):
unixfrom = message.get_unixfrom()
if unixfrom is not None and unixfrom.startswith('From '):
self.set_from(unixfrom[5:])
Message.__init__(self, message)
def get_from(self):
"""Return contents of "From " line."""
return self._from
def set_from(self, from_, time_=None):
"""Set "From " line, formatting and appending time_ if specified."""
if time_ is not None:
if time_ is True:
time_ = time.gmtime()
from_ += ' ' + time.asctime(time_)
self._from = from_
def get_flags(self):
"""Return as a string the flags that are set."""
return self.get('Status', '') + self.get('X-Status', '')
def set_flags(self, flags):
"""Set the given flags and unset all others."""
flags = set(flags)
status_flags, xstatus_flags = '', ''
for flag in ('R', 'O'):
if flag in flags:
status_flags += flag
flags.remove(flag)
for flag in ('D', 'F', 'A'):
if flag in flags:
xstatus_flags += flag
flags.remove(flag)
xstatus_flags += ''.join(sorted(flags))
try:
self.replace_header('Status', status_flags)
except KeyError:
self.add_header('Status', status_flags)
try:
self.replace_header('X-Status', xstatus_flags)
except KeyError:
self.add_header('X-Status', xstatus_flags)
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def _explain_to(self, message):
"""Copy mbox- or MMDF-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
flags = set(self.get_flags())
if 'O' in flags:
message.set_subdir('cur')
if 'F' in flags:
message.add_flag('F')
if 'A' in flags:
message.add_flag('R')
if 'R' in flags:
message.add_flag('S')
if 'D' in flags:
message.add_flag('T')
del message['status']
del message['x-status']
maybe_date = ' '.join(self.get_from().split()[-5:])
try:
message.set_date(calendar.timegm(time.strptime(maybe_date,
'%a %b %d %H:%M:%S %Y')))
except (ValueError, OverflowError):
pass
elif isinstance(message, _mboxMMDFMessage):
message.set_flags(self.get_flags())
message.set_from(self.get_from())
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_sequence('unseen')
if 'A' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
del message['status']
del message['x-status']
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_label('unseen')
if 'D' in flags:
message.add_label('deleted')
if 'A' in flags:
message.add_label('answered')
del message['status']
del message['x-status']
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class mboxMessage(_mboxMMDFMessage):
"""Message with mbox-specific properties."""
class MHMessage(Message):
"""Message with MH-specific properties."""
_type_specific_attributes = ['_sequences']
def __init__(self, message=None):
"""Initialize an MHMessage instance."""
self._sequences = []
Message.__init__(self, message)
def get_sequences(self):
"""Return a list of sequences that include the message."""
return self._sequences[:]
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
def add_sequence(self, sequence):
"""Add sequence to list of sequences including the message."""
if isinstance(sequence, str):
if not sequence in self._sequences:
self._sequences.append(sequence)
else:
raise TypeError('sequence type must be str: %s' % type(sequence))
def remove_sequence(self, sequence):
"""Remove sequence from the list of sequences including the message."""
try:
self._sequences.remove(sequence)
except ValueError:
pass
def _explain_to(self, message):
"""Copy MH-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('R')
elif isinstance(message, _mboxMMDFMessage):
sequences = set(self.get_sequences())
if 'unseen' not in sequences:
message.add_flag('RO')
else:
message.add_flag('O')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('A')
elif isinstance(message, MHMessage):
for sequence in self.get_sequences():
message.add_sequence(sequence)
elif isinstance(message, BabylMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.add_label('unseen')
if 'replied' in sequences:
message.add_label('answered')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class BabylMessage(Message):
"""Message with Babyl-specific properties."""
_type_specific_attributes = ['_labels', '_visible']
def __init__(self, message=None):
"""Initialize an BabylMessage instance."""
self._labels = []
self._visible = Message()
Message.__init__(self, message)
def get_labels(self):
"""Return a list of labels on the message."""
return self._labels[:]
def set_labels(self, labels):
"""Set the list of labels on the message."""
self._labels = list(labels)
def add_label(self, label):
"""Add label to list of labels on the message."""
if isinstance(label, str):
if label not in self._labels:
self._labels.append(label)
else:
raise TypeError('label must be a string: %s' % type(label))
def remove_label(self, label):
"""Remove label from the list of labels on the message."""
try:
self._labels.remove(label)
except ValueError:
pass
def get_visible(self):
"""Return a Message representation of visible headers."""
return Message(self._visible)
def set_visible(self, visible):
"""Set the Message representation of visible headers."""
self._visible = Message(visible)
def update_visible(self):
"""Update and/or sensibly generate a set of visible headers."""
for header in self._visible.keys():
if header in self:
self._visible.replace_header(header, self[header])
else:
del self._visible[header]
for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
if header in self and header not in self._visible:
self._visible[header] = self[header]
def _explain_to(self, message):
"""Copy Babyl-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'forwarded' in labels or 'resent' in labels:
message.add_flag('P')
if 'answered' in labels:
message.add_flag('R')
if 'deleted' in labels:
message.add_flag('T')
elif isinstance(message, _mboxMMDFMessage):
labels = set(self.get_labels())
if 'unseen' not in labels:
message.add_flag('RO')
else:
message.add_flag('O')
if 'deleted' in labels:
message.add_flag('D')
if 'answered' in labels:
message.add_flag('A')
elif isinstance(message, MHMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.add_sequence('unseen')
if 'answered' in labels:
message.add_sequence('replied')
elif isinstance(message, BabylMessage):
message.set_visible(self.get_visible())
for label in self.get_labels():
message.add_label(label)
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class MMDFMessage(_mboxMMDFMessage):
"""Message with MMDF-specific properties."""
class _ProxyFile:
"""A read-only wrapper of a file."""
def __init__(self, f, pos=None):
"""Initialize a _ProxyFile."""
self._file = f
if pos is None:
self._pos = f.tell()
else:
self._pos = pos
def read(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read)
def read1(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read1)
def readline(self, size=None):
"""Read a line."""
return self._read(size, self._file.readline)
def readlines(self, sizehint=None):
"""Read multiple lines."""
result = []
for line in self:
result.append(line)
if sizehint is not None:
sizehint -= len(line)
if sizehint <= 0:
break
return result
def __iter__(self):
"""Iterate over lines."""
while True:
line = self.readline()
if not line:
raise StopIteration
yield line
def tell(self):
"""Return the position."""
return self._pos
def seek(self, offset, whence=0):
"""Change position."""
if whence == 1:
self._file.seek(self._pos)
self._file.seek(offset, whence)
self._pos = self._file.tell()
def close(self):
"""Close the file."""
if hasattr(self, '_file'):
if hasattr(self._file, 'close'):
self._file.close()
del self._file
def _read(self, size, read_method):
"""Read size bytes using read_method."""
if size is None:
size = -1
self._file.seek(self._pos)
result = read_method(size)
self._pos = self._file.tell()
return result
def __enter__(self):
"""Context management protocol support."""
return self
def __exit__(self, *exc):
self.close()
def readable(self):
return self._file.readable()
def writable(self):
return self._file.writable()
def seekable(self):
return self._file.seekable()
def flush(self):
return self._file.flush()
@property
def closed(self):
if not hasattr(self, '_file'):
return True
if not hasattr(self._file, 'closed'):
return False
return self._file.closed
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
def __init__(self, f, start=None, stop=None):
"""Initialize a _PartialFile."""
_ProxyFile.__init__(self, f, start)
self._start = start
self._stop = stop
def tell(self):
"""Return the position with respect to start."""
return _ProxyFile.tell(self) - self._start
def seek(self, offset, whence=0):
"""Change position, possibly with respect to start or stop."""
if whence == 0:
self._pos = self._start
whence = 1
elif whence == 2:
self._pos = self._stop
whence = 1
_ProxyFile.seek(self, offset, whence)
def _read(self, size, read_method):
"""Read size bytes using read_method, honoring start and stop."""
remaining = self._stop - self._pos
if remaining <= 0:
return b''
if size is None or size < 0 or size > remaining:
size = remaining
return _ProxyFile._read(self, size, read_method)
def close(self):
# do *not* close the underlying file object for partial files,
# since it's global to the mailbox object
if hasattr(self, '_file'):
del self._file
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except OSError as e:
if e.errno in (errno.EACCES, errno.EROFS):
return # Without write access, just skip dotlocking.
else:
raise
try:
if hasattr(os, 'link'):
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
os.unlink(pre_lock.name)
else:
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
except FileExistsError:
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
def _unlock_file(f):
"""Unlock file f using lockf and dot locking."""
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if os.path.exists(f.name + '.lock'):
os.remove(f.name + '.lock')
def _create_carefully(path):
"""Create a file if it doesn't exist and open for reading and writing."""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o666)
try:
return open(path, 'rb+')
finally:
os.close(fd)
def _create_temporary(path):
"""Create a temp file based on path and open for reading and writing."""
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
socket.gethostname(),
os.getpid()))
def _sync_flush(f):
"""Ensure changes to file f are physically on disk."""
f.flush()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
def _sync_close(f):
"""Close file f, ensuring all changes are physically on disk."""
_sync_flush(f)
f.close()
class Error(Exception):
"""Raised for module-specific errors."""
class NoSuchMailboxError(Error):
"""The specified mailbox does not exist and won't be created."""
class NotEmptyError(Error):
"""The specified mailbox is not empty and deletion was requested."""
class ExternalClashError(Error):
"""Another process caused an action to fail."""
class FormatError(Error):
"""A file appears to have an invalid format."""
|
MiLk/ansible | refs/heads/devel | lib/ansible/modules/monitoring/monit.py | 28 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Darryl Stoflet <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- Manage the state of a program monitored via I(Monit)
version_added: "1.2"
options:
name:
description:
- The name of the I(monit) program/process to manage
required: true
default: null
state:
description:
- The state of service
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
timeout:
description:
- If there are pending actions for the service monitored by monit, then Ansible will check
for up to this many seconds to verify the the requested action has been performed.
Ansible will sleep for five seconds between each check.
required: false
default: 300
version_added: "2.1"
requirements: [ ]
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
- monit:
name: httpd
state: started
'''
import time
def main():
arg_spec = dict(
name=dict(required=True),
timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
def status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True)
for line in out.split('\n'):
# Sample output lines:
# Process 'name' Running
# Process 'name' Running - restart pending
parts = line.split()
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
return ' '.join(parts[2:]).lower()
else:
return ''
def run_command(command):
"""Runs a monit command, and returns the new status."""
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return status()
def wait_for_monit_to_stop_pending():
"""Fails this run if there is no status or it's pending/initializing for timeout"""
timeout_time = time.time() + timeout
sleep_time = 5
running_status = status()
while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
if time.time() >= timeout_time:
module.fail_json(
msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
running_status
),
state=state
)
time.sleep(sleep_time)
running_status = status()
if state == 'reloaded':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command('%s reload' % MONIT)
if rc != 0:
module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
present = status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
wait_for_monit_to_stop_pending()
running = 'running' in status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('stop')
if status in ['not monitored'] or 'stop pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not stopped' % name, status=status)
if running and state == 'unmonitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('unmonitor')
if status in ['not monitored'] or 'unmonitor pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not unmonitored' % name, status=status)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('restart')
if status in ['initializing', 'running'] or 'restart pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not restarted' % name, status=status)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('start')
if status in ['initializing', 'running'] or 'start pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not started' % name, status=status)
elif not running and state == 'monitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('monitor')
if status not in ['not monitored']:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not monitored' % name, status=status)
module.exit_json(changed=False, name=name, state=state)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
diagramsoftware/odoo | refs/heads/8.0 | addons/sale_layout/models/sale_layout.py | 58 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence, id'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
import openerp
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_order = 'invoice_id, categ_sequence, sequence, id'
sale_layout_cat_id = openerp.fields.Many2one('sale_layout.category', string='Section')
categ_sequence = openerp.fields.Integer(related='sale_layout_cat_id.sequence',
string='Layout Sequence', store=True)
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_order = 'order_id, categ_sequence, sale_layout_cat_id, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
|
jml/flocker | refs/heads/master | flocker/control/test/__init__.py | 15 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``flocker.control``.
"""
|
yfried/ansible | refs/heads/devel | lib/ansible/modules/network/f5/bigip_gtm_monitor_tcp.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_monitor_tcp
short_description: Manages F5 BIG-IP GTM tcp monitors
description:
- Manages F5 BIG-IP GTM tcp monitors.
version_added: 2.6
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp)
parent on the C(Common) partition.
default: /Common/tcp
send:
description:
- The send string for the monitor call.
receive:
description:
- The receive string for the monitor call.
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, then a C(port) number must be specified.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run.
- If this parameter is not provided when creating a new monitor, then the
default value will be 30.
- This value B(must) be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second.
- If this parameter is not provided when creating a new monitor, then the
default value will be 120.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
probe_timeout:
description:
- Specifies the number of seconds after which the system times out the probe request
to the system.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(5).
ignore_down_response:
description:
- Specifies that the monitor allows more than one probe attempt per interval.
- When C(yes), specifies that the monitor ignores down responses for the duration of
the monitor timeout. Once the monitor timeout is reached without the system receiving
an up response, the system marks the object down.
- When C(no), specifies that the monitor immediately marks an object down when it
receives a down response.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(no).
type: bool
transparent:
description:
- Specifies whether the monitor operates in transparent mode.
- A monitor in transparent mode directs traffic through the associated pool members
or nodes (usually a router or firewall) to the aliased destination (that is, it
probes the C(ip)-C(port) combination specified in the monitor).
- If the monitor cannot successfully reach the aliased destination, the pool member
or node through which the monitor traffic was sent is marked down.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(no).
type: bool
reverse:
description:
- Instructs the system to mark the target resource down when the test is successful.
This setting is useful, for example, if the content on your web site home page is
dynamic and changes frequently, you may want to set up a reverse ECV service check
that looks for the string Error.
- A match for this string means that the web server was down.
- To use this option, you must specify values for C(send) and C(receive).
type: bool
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a GTM TCP monitor
bigip_gtm_monitor_tcp:
name: my_monitor
ip: 1.1.1.1
port: 80
send: my send string
receive: my receive string
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Remove TCP Monitor
bigip_gtm_monitor_tcp:
name: my_monitor
state: absent
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add TCP monitor for all addresses, port 514
bigip_gtm_monitor_tcp:
name: my_monitor
server: lb.mydomain.com
user: admin
port: 514
password: secret
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: tcp
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
port:
description: The new port the monitor checks the resource on.
returned: changed
type: string
sample: 8080
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
ignore_down_response:
description: Whether to ignore the down response or not.
returned: changed
type: bool
sample: True
send:
description: The new send string for this monitor.
returned: changed
type: string
sample: tcp string to send
receive:
description: The new receive string for this monitor.
returned: changed
type: string
sample: tcp string to receive
probe_timeout:
description: The new timeout in which the system will timeout the monitor probe.
returned: changed
type: int
sample: 10
reverse:
description: The new value for whether the monitor operates in reverse mode.
returned: changed
type: bool
sample: False
transparent:
description: The new value for whether the monitor operates in transparent mode.
returned: changed
type: bool
sample: False
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import module_provisioned
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import module_provisioned
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'ignoreDownResponse': 'ignore_down_response',
'probeTimeout': 'probe_timeout',
'recv': 'receive',
}
api_attributes = [
'defaultsFrom',
'interval',
'timeout',
'destination',
'transparent',
'probeTimeout',
'ignoreDownResponse',
'reverse',
'send',
'recv',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'transparent',
'probe_timeout',
'ignore_down_response',
'send',
'receive',
'reverse',
]
updatables = [
'destination',
'interval',
'timeout',
'transparent',
'probe_timeout',
'ignore_down_response',
'send',
'receive',
'reverse',
'ip',
'port',
]
class ApiParameters(Parameters):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
try:
return int(port)
except ValueError:
return port
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
if self._values['ignore_down_response'] == 'disabled':
return False
return True
@property
def transparent(self):
if self._values['transparent'] is None:
return None
if self._values['transparent'] == 'disabled':
return False
return True
@property
def reverse(self):
if self._values['reverse'] is None:
return None
if self._values['reverse'] == 'disabled':
return False
return True
class ModuleParameters(Parameters):
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
elif self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def probe_timeout(self):
if self._values['probe_timeout'] is None:
return None
return int(self._values['probe_timeout'])
@property
def type(self):
return 'tcp'
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def transparent(self):
if self._values['transparent'] is None:
return None
elif self._values['transparent'] is True:
return 'enabled'
return 'disabled'
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
elif self._values['ignore_down_response'] is True:
return 'enabled'
return 'disabled'
@property
def reverse(self):
if self._values['reverse'] is None:
return None
elif self._values['reverse'] is True:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
return int(port)
@property
def transparent(self):
if self._values['transparent'] == 'enabled':
return True
return False
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] == 'enabled':
return True
return False
@property
def reverse(self):
if self._values['reverse'] == 'enabled':
return True
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 120})
if self.want.interval is None:
self.want.update({'interval': 30})
if self.want.probe_timeout is None:
self.want.update({'probe_timeout': 5})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.ignore_down_response is None:
self.want.update({'ignore_down_response': False})
if self.want.transparent is None:
self.want.update({'transparent': False})
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_default_creation_values()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/tcp/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/tcp'),
send=dict(),
receive=dict(),
ip=dict(),
port=dict(type='int'),
interval=dict(type='int'),
timeout=dict(type='int'),
ignore_down_response=dict(type='bool'),
transparent=dict(type='bool'),
probe_timeout=dict(type='int'),
reverse=dict(type='bool'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
mr-c/tools-iuc | refs/heads/master | tools/htseq/htseqsams2mx.py | 1 | # May 2013
# Change to htseq as the counting engine - wrap so arbitrary number of columns created
# borged Simon Anders' "count.py" since we need a vector of counts rather than a new sam file as output
# note attribution for htseq and count.py :
## Written by Simon Anders ([email protected]), European Molecular Biology
## Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General
## Public License v3. Part of the 'HTSeq' framework, version HTSeq-0.5.4p3
# updated ross lazarus august 2011 to NOT include region and to finesse the name as the region for bed3 format inputs
# also now sums all duplicate named regions and provides a summary of any collapsing as the info
# updated ross lazarus july 26 to respect the is_duplicate flag rather than try to second guess
# note Heng Li argues that removing dupes is a bad idea for RNA seq
# updated ross lazarus july 22 to count reads OUTSIDE each bed region during the processing of each bam
# added better sorting with decoration of a dict key later sorted and undecorated.
# code cleaned up and galaxified ross lazarus july 18 et seq.
# bams2mx.py -turns a series of bam and a bed file into a matrix of counts Usage bams2mx.py <halfwindow> <bedfile.bed> <bam1.bam>
# <bam2.bam>
# uses pysam to read and count bam reads over each bed interval for each sample for speed
# still not so fast
# TODO options -shift -unique
#
"""
how this gets run:
(vgalaxy)galaxy@iaas1-int:~$ cat database/job_working_directory/027/27014/galaxy_27014.sh
#!/bin/sh
GALAXY_LIB="/data/extended/galaxy/lib"
if [ "$GALAXY_LIB" != "None" ]; then
if [ -n "$PYTHONPATH" ]; then
PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"
else
PYTHONPATH="$GALAXY_LIB"
fi
export PYTHONPATH
fi
cd /data/extended/galaxy/database/job_working_directory/027/27014
python /data/extended/galaxy/tools/rgenetics/htseqsams2mx.py -g "/data/extended/galaxy/database/files/034/dataset_34115.dat" -o "/data/extended/galaxy/database/files/034/dataset_34124.dat" -m "union" --id_attribute "gene_id" --feature_type "exon" --samf "'/data/extended/galaxy/database/files/033/dataset_33980.dat','T5A_C1PPHACXX_AGTTCC_L003_R1.fastq_bwa.sam'" --samf "'/data/extended/galaxy/database/files/033/dataset_33975.dat','T5A_C1PPHACXX_AGTTCC_L002_R1.fastq_bwa.sam'"; cd /data/extended/galaxy; /data/extended/galaxy/set_metadata.sh ./database/files /data/extended/galaxy/database/job_working_directory/027/27014 . /data/extended/galaxy/universe_wsgi.ini /data/tmp/tmpmwsElH /data/extended/galaxy/database/job_working_directory/027/27014/galaxy.json /data/extended/galaxy/database/job_working_directory/027/27014/metadata_in_HistoryDatasetAssociation_45202_sfOMGa,/data/extended/galaxy/database/job_working_directory/027/27014/metadata_kwds_HistoryDatasetAssociation_45202_gaMnxa,/data/extended/galaxy/database/job_working_directory/027/27014/metadata_out_HistoryDatasetAssociation_45202_kZPsZO,/data/extended/galaxy/database/job_working_directory/027/27014/metadata_results_HistoryDatasetAssociation_45202_bXU7IU,,/data/extended/galaxy/database/job_working_directory/027/27014/metadata_override_HistoryDatasetAssociation_45202_hyLAvh
echo $? > /data/extended/galaxy/database/job_working_directory/027/27014/galaxy_27014.ec
"""
import os
import re
import sys
import HTSeq.scripts.count as htcount
import optparse
import tempfile
import shutil
import operator
import subprocess
import itertools
import warnings
import traceback
import HTSeq
import time
class Xcpt(Exception):
def __init__(self, msg):
self.msg = msg
def htseqMX(gff_filename,sam_filenames,colnames,sam_exts,sam_bais,opts):
"""
Code taken from count.py in Simon Anders HTSeq distribution
Wrapped in a loop to accept multiple bam/sam files and their names from galaxy to
produce a matrix of contig counts by sample for downstream use in edgeR and DESeq tools
"""
class UnknownChrom( Exception ):
pass
def my_showwarning( message, category, filename, lineno = None, line = None ):
sys.stdout.write( "Warning: %s\n" % message )
def invert_strand( iv ):
iv2 = iv.copy()
if iv2.strand == "+":
iv2.strand = "-"
elif iv2.strand == "-":
iv2.strand = "+"
else:
raise ValueError, "Illegal strand"
return iv2
def count_reads_in_features( sam_filenames, colnames, gff_filename, opts ):
""" Hacked version of htseq count.py
"""
if opts.quiet:
warnings.filterwarnings( action="ignore", module="HTSeq" )
features = HTSeq.GenomicArrayOfSets( "auto", opts.stranded != "no" )
mapqMin = int(opts.mapqMin)
counts = {}
nreads = 0
empty = 0
ambiguous = 0
notaligned = 0
lowqual = 0
nonunique = 0
filtered = 0 # new filter_extras - need a better way to do this - independent filter tool?
gff = HTSeq.GFF_Reader( gff_filename )
try:
for i,f in enumerate(gff):
if f.type == opts.feature_type:
try:
feature_id = f.attr[ opts.id_attribute ]
except KeyError:
try:
feature_id = f.attr[ 'gene_id' ]
except KeyError:
sys.exit( "Feature at row %d %s does not contain a '%s' attribute OR a gene_id attribute - faulty GFF?" %
( (i+1), f.name, opts.id_attribute ) )
if opts.stranded != "no" and f.iv.strand == ".":
sys.exit( "Feature %s at %s does not have strand information but you are "
"running htseq-count in stranded mode. Use '--stranded=no'." %
( f.name, f.iv ) )
features[ f.iv ] += feature_id
counts[ feature_id ] = [0 for x in colnames] # we use sami as an index here to bump counts later
except:
sys.stderr.write( "Error occured in %s.\n" % gff.get_line_number_string() )
raise
if not opts.quiet:
sys.stdout.write( "%d GFF lines processed.\n" % i )
if len( counts ) == 0 and not opts.quiet:
sys.stdout.write( "Warning: No features of type '%s' found.\n" % opts.feature_type )
for sami,sam_filename in enumerate(sam_filenames):
colname = colnames[sami]
isbam = sam_exts[sami] == 'bam'
hasbai = sam_bais[sami] > ''
if hasbai:
tempname = os.path.splitext(os.path.basename(sam_filename))[0]
tempbam = '%s_TEMP.bam' % tempname
tempbai = '%s_TEMP.bai' % tempname
os.link(sam_filename,tempbam)
os.link(sam_bais[sami],tempbai)
try:
if isbam:
if hasbai:
read_seq = HTSeq.BAM_Reader ( tempbam )
else:
read_seq = HTSeq.BAM_Reader( sam_filename )
else:
read_seq = HTSeq.SAM_Reader( sam_filename )
first_read = iter(read_seq).next()
pe_mode = first_read.paired_end
except:
if isbam:
print >> sys.stderr, "Error occured when reading first line of bam file %s colname=%s \n" % (sam_filename,colname )
else:
print >> sys.stderr, "Error occured when reading first line of sam file %s colname=%s \n" % (sam_filename,colname )
raise
try:
if pe_mode:
read_seq_pe_file = read_seq
read_seq = HTSeq.pair_SAM_alignments( read_seq )
for seqi,r in enumerate(read_seq):
nreads += 1
if not pe_mode:
if not r.aligned:
notaligned += 1
continue
try:
if len(opts.filter_extras) > 0:
for extra in opts.filter_extras:
if r.optional_field(extra):
filtered += 1
continue
if r.optional_field( "NH" ) > 1:
nonunique += 1
continue
except KeyError:
pass
if r.aQual < mapqMin:
lowqual += 1
continue
if opts.stranded != "reverse":
iv_seq = ( co.ref_iv for co in r.cigar if co.type == "M" and co.size > 0 )
else:
iv_seq = ( invert_strand( co.ref_iv ) for co in r.cigar if co.type == "M" and co.size > 0 )
else:
if r[0] is not None and r[0].aligned:
if opts.stranded != "reverse":
iv_seq = ( co.ref_iv for co in r[0].cigar if co.type == "M" and co.size > 0 )
else:
iv_seq = ( invert_strand( co.ref_iv ) for co in r[0].cigar if co.type == "M" and co.size > 0 )
else:
iv_seq = tuple()
if r[1] is not None and r[1].aligned:
if opts.stranded != "reverse":
iv_seq = itertools.chain( iv_seq,
( invert_strand( co.ref_iv ) for co in r[1].cigar if co.type == "M" and co.size > 0 ) )
else:
iv_seq = itertools.chain( iv_seq,
( co.ref_iv for co in r[1].cigar if co.type == "M" and co.size > 0 ) )
else:
if ( r[0] is None ) or not ( r[0].aligned ):
notaligned += 1
continue
try:
if ( r[0] is not None and r[0].optional_field( "NH" ) > 1 ) or \
( r[1] is not None and r[1].optional_field( "NH" ) > 1 ):
nonunique += 1
continue
except KeyError:
pass
if ( r[0] and r[0].aQual < mapqMin ) or ( r[1] and r[1].aQual < mapqMin ):
lowqual += 1
continue
try:
if opts.mode == "union":
fs = set()
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[ iv ].steps():
fs = fs.union( fs2 )
elif opts.mode == "intersection-strict" or opts.mode == "intersection-nonempty":
fs = None
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[ iv ].steps():
if len(fs2) > 0 or opts.mode == "intersection-strict":
if fs is None:
fs = fs2.copy()
else:
fs = fs.intersection( fs2 )
else:
sys.exit( "Illegal overlap mode %s" % opts.mode )
if fs is None or len( fs ) == 0:
empty += 1
elif len( fs ) > 1:
ambiguous += 1
else:
ck = list(fs)[0]
counts[ck][sami] += 1 # end up with counts for each sample as a list
except UnknownChrom:
if not pe_mode:
rr = r
else:
rr = r[0] if r[0] is not None else r[1]
empty += 1
if not opts.quiet:
sys.stdout.write( ( "Warning: Skipping read '%s', because chromosome " +
"'%s', to which it has been aligned, did not appear in the GFF file.\n" ) %
( rr.read.name, iv.chrom ) )
except:
if not pe_mode:
sys.stderr.write( "Error occured in %s.\n" % read_seq.get_line_number_string() )
else:
sys.stderr.write( "Error occured in %s.\n" % read_seq_pe_file.get_line_number_string() )
raise
if not opts.quiet:
sys.stdout.write( "%d sam %s processed for %s.\n" % ( seqi, "lines " if not pe_mode else "line pairs", colname ) )
return counts,empty,ambiguous,lowqual,notaligned,nonunique,filtered,nreads
warnings.showwarning = my_showwarning
assert os.path.isfile(gff_filename),'## unable to open supplied gff file %s' % gff_filename
try:
counts,empty,ambiguous,lowqual,notaligned,nonunique,filtered,nreads = count_reads_in_features( sam_filenames, colnames, gff_filename,opts)
except:
sys.stderr.write( "Error: %s\n" % str( sys.exc_info()[1] ) )
sys.stderr.write( "[Exception type: %s, raised in %s:%d]\n" %
( sys.exc_info()[1].__class__.__name__,
os.path.basename(traceback.extract_tb( sys.exc_info()[2] )[-1][0]),
traceback.extract_tb( sys.exc_info()[2] )[-1][1] ) )
sys.exit( 1 )
return counts,empty,ambiguous,lowqual,notaligned,nonunique,filtered,nreads
def usage():
print >> sys.stdout, """Usage: python htseqsams2mx.py -w <halfwindowsize> -g <gfffile.gff> -o <outfilename> [-i] [-c] --samf "<sam1.sam>,<sam1.column_header>" --samf "...<samN.column_header>" """
sys.exit(1)
if __name__ == "__main__":
"""
<command interpreter="python">
htseqsams2mx.py -w "$halfwin" -g "$gfffile" -o "$outfile" -m "union"
#for $s in $samfiles:
--samf "'${s.samf}','${s.samf.name}'"
#end for
</command>
"""
if len(sys.argv) < 2:
usage()
sys.exit(1)
starttime = time.time()
op = optparse.OptionParser()
# All tools
op.add_option('-w', '--halfwindow', default="0")
op.add_option('-m', '--mode', default="union")
op.add_option('-s', '--stranded', default="no")
op.add_option('-y', '--feature_type', default="exon")
op.add_option('-g', '--gff_file', default=None)
op.add_option('-o', '--outfname', default=None)
op.add_option('-f','--forceName', default="false")
op.add_option('--samf', default=[], action="append")
op.add_option('--filter_extras', default=[], action="append")
op.add_option('--mapqMin', default='0')
op.add_option( "-t", "--type", type="string", dest="featuretype",
default = "exon", help = "feature type (3rd column in GFF file) to be used, " +
"all features of other type are ignored (default, suitable for Ensembl " +
"GTF files: exon)" )
op.add_option( "-i", "--id_attribute", type="string", dest="id_attribute",
default = "gene_name", help = "GTF attribute to be used as feature ID (default, " +
"suitable for Ensembl GTF files: gene_id)" )
op.add_option( "-q", "--quiet", action="store_true", dest="quiet", default = False,
help = "suppress progress report and warnings" )
opts, args = op.parse_args()
halfwindow = int(opts.halfwindow)
gff_file = opts.gff_file
assert os.path.isfile(gff_file),'##ERROR htseqsams2mx: Supplied input GFF file "%s" not found' % gff_file
outfname = opts.outfname
sam_filenames = []
colnames = []
samf = opts.samf
samfsplit = [x.split(',') for x in samf] # one per samf set
samsets = []
for samfs in samfsplit:
samset = [x.replace("'","") for x in samfs]
samset = [x.replace('"','') for x in samset]
samsets.append(samset)
samsets = [x for x in samsets if x[0].lower() != 'none']
# just cannot stop getting these on cl! wtf in cheetah for a repeat group?
samfnames = [x[0] for x in samsets]
if len(set(samfnames)) != len(samfnames):
samnames = []
delme = []
for i,s in enumerate(samfnames):
if s in samnames:
delme.append(i)
print sys.stdout,'## WARNING htseqsams2mx: Duplicate input sam file %s in %s - ignoring dupe in 0 based position %s' %\
(s,','.join(samfnames), str(delme))
else:
samnames.append(s) # first time
samsets = [x for i,x in enumerate(samsets) if not (i in delme)]
samfnames = [x[0] for x in samsets]
scolnames = [x[1]for x in samsets]
assert len(samfnames) == len(scolnames), '##ERROR sams2mx: Count of sam/cname not consistent - %d/%d' % (len(samfnames),len(scolnames))
sam_exts = [x[2] for x in samsets]
assert len(samfnames) == len(sam_exts), '##ERROR sams2mx: Count of extensions not consistent - %d/%d' % (len(samfnames),len(sam_exts))
sam_bais = [x[3] for x in samsets] # these only exist for bams and need to be finessed with a symlink so pysam will just work
for i,b in enumerate(samfnames):
assert os.path.isfile(b),'## Supplied input sam file "%s" not found' % b
sam_filenames.append(b)
sampName = scolnames[i] # better be unique
sampName = sampName.replace('#','') # for R
sampName = sampName.replace('(','') # for R
sampName = sampName.replace(')','') # for R
sampName = sampName.replace(' ','_') # for R
colnames.append(sampName)
counts,empty,ambiguous,lowqual,notaligned,nonunique,filtered,nreads = htseqMX(gff_file, sam_filenames,colnames,sam_exts,sam_bais,opts)
heads = '\t'.join(['Contig',] + colnames)
res = [heads,]
contigs = counts.keys()
contigs.sort()
totalc = 0
emptycontigs = 0
for contig in contigs:
thisc = sum(counts[contig])
if thisc > 0: # no output for empty contigs
totalc += thisc
crow = [contig,] + ['%d' % x for x in counts[contig]]
res.append('\t'.join(crow))
else:
emptycontigs += 1
outf = open(opts.outfname,'w')
outf.write('\n'.join(res))
outf.write('\n')
outf.close()
walltime = int(time.time() - starttime)
accumulatornames = ('walltime (seconds)','total reads read','total reads counted','number of contigs','total empty reads','total ambiguous reads','total low quality reads',
'total not aligned reads','total not unique mapping reads','extra filtered reads','empty contigs')
accums = (walltime,nreads,totalc,len(contigs),empty,ambiguous,lowqual,notaligned,nonunique,filtered,emptycontigs)
fracs = (1.0,1.0,float(totalc)/nreads,1.0,float(empty)/nreads,float(ambiguous)/nreads,float(lowqual)/nreads,float(notaligned)/nreads,float(nonunique)/nreads,float(filtered)/nreads,float(emptycontigs)/len(contigs))
notes = ['%s = %d (%2.3f)' % (accumulatornames[i],x,100.0*fracs[i]) for i,x in enumerate(accums)]
print >> sys.stdout, '\n'.join(notes)
sys.exit(0)
|
akarambir/twython | refs/heads/master | examples/get_user_timeline.py | 9 | from twython import Twython, TwythonError
# Requires Authentication as of Twitter API v1.1
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
try:
user_timeline = twitter.get_user_timeline(screen_name='ryanmcgrath')
except TwythonError as e:
print e
print user_timeline
|
bobcyw/django | refs/heads/master | tests/apps/default_config_app/apps.py | 553 | from django.apps import AppConfig
class CustomConfig(AppConfig):
name = 'apps.default_config_app'
|
YoshikawaMasashi/magenta | refs/heads/master | magenta/models/rl_tuner/rl_tuner.py | 2 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the main RL Tuner class.
RL Tuner is a Deep Q Network (DQN) with augmented reward to create melodies
by using reinforcement learning to fine-tune a trained Note RNN according
to some music theory rewards.
Also implements two alternatives to Q learning: Psi and G learning. The
algorithm can be switched using the 'algorithm' hyperparameter.
For more information, please consult the README.md file in this directory.
"""
from collections import deque
import os
from os import makedirs
from os.path import exists
import random
import urllib
# internal imports
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import logsumexp
import tensorflow as tf
from magenta.models.rl_tuner import note_rnn_loader
from magenta.models.rl_tuner import rl_tuner_eval_metrics
from magenta.models.rl_tuner import rl_tuner_ops
from magenta.music import melodies_lib as mlib
from magenta.music import midi_io
# Note values of special actions.
NOTE_OFF = 0
NO_EVENT = 1
# Training data sequences are limited to this length, so the padding queue pads
# to this length.
TRAIN_SEQUENCE_LENGTH = 192
def reload_files():
"""Used to reload the imported dependency files (needed for ipynb notebooks).
"""
reload(note_rnn_loader)
reload(rl_tuner_ops)
reload(rl_tuner_eval_metrics)
class RLTuner(object):
"""Implements a recurrent DQN designed to produce melody sequences."""
def __init__(self, output_dir,
# Hyperparameters
dqn_hparams=None,
reward_mode='music_theory_all',
reward_scaler=1.0,
exploration_mode='egreedy',
priming_mode='random_note',
stochastic_observations=False,
algorithm='q',
# Trained Note RNN to load and tune
note_rnn_checkpoint_dir=None,
note_rnn_checkpoint_file=None,
note_rnn_type='default',
note_rnn_hparams=None,
# Other music related settings.
num_notes_in_melody=32,
input_size=rl_tuner_ops.NUM_CLASSES,
num_actions=rl_tuner_ops.NUM_CLASSES,
midi_primer=None,
# Logistics.
save_name='rl_tuner.ckpt',
output_every_nth=1000,
training_file_list=None,
summary_writer=None,
initialize_immediately=True):
"""Initializes the MelodyQNetwork class.
Args:
output_dir: Where the model will save its compositions (midi files).
dqn_hparams: A tf_lib.hparams() object containing the hyperparameters of
the DQN algorithm, including minibatch size, exploration probability,
etc.
reward_mode: Controls which reward function can be applied. There are
several, including 'scale', which teaches the model to play a scale,
and of course 'music_theory_all', which is a music-theory-based reward
function composed of other functions.
reward_scaler: Controls the emphasis placed on the music theory rewards.
This value is the inverse of 'c' in the academic paper.
exploration_mode: can be 'egreedy' which is an epsilon greedy policy, or
it can be 'boltzmann', in which the model will sample from its output
distribution to choose the next action.
priming_mode: Each time the model begins a new composition, it is primed
with either a random note ('random_note'), a random MIDI file from the
training data ('random_midi'), or a particular MIDI file
('single_midi').
stochastic_observations: If False, the note that the model chooses to
play next (the argmax of its softmax probabilities) deterministically
becomes the next note it will observe. If True, the next observation
will be sampled from the model's softmax output.
algorithm: can be 'default', 'psi', 'g' or 'pure_rl', for different
learning algorithms
note_rnn_checkpoint_dir: The directory from which the internal
NoteRNNLoader will load its checkpointed LSTM.
note_rnn_checkpoint_file: A checkpoint file to use in case one cannot be
found in the note_rnn_checkpoint_dir.
note_rnn_type: If 'default', will use the basic LSTM described in the
research paper. If 'basic_rnn', will assume the checkpoint is from a
Magenta basic_rnn model.
note_rnn_hparams: A tf.HParams object which defines the hyper parameters
used to train the MelodyRNN model that will be loaded from a checkpoint.
num_notes_in_melody: The length of a composition of the model
input_size: the size of the one-hot vector encoding a note that is input
to the model.
num_actions: The size of the one-hot vector encoding a note that is
output by the model.
midi_primer: A midi file that can be used to prime the model if
priming_mode is set to 'single_midi'.
save_name: Name the model will use to save checkpoints.
output_every_nth: How many training steps before the model will print
an output saying the cumulative reward, and save a checkpoint.
training_file_list: A list of paths to tfrecord files containing melody
training data. This is necessary to use the 'random_midi' priming mode.
summary_writer: A tf.summary.FileWriter used to log metrics.
initialize_immediately: if True, the class will instantiate its component
MelodyRNN networks and build the graph in the constructor.
"""
# Make graph.
self.graph = tf.Graph()
with self.graph.as_default():
# Memorize arguments.
self.input_size = input_size
self.num_actions = num_actions
self.output_every_nth = output_every_nth
self.output_dir = output_dir
self.save_path = os.path.join(output_dir, save_name)
self.reward_scaler = reward_scaler
self.reward_mode = reward_mode
self.exploration_mode = exploration_mode
self.num_notes_in_melody = num_notes_in_melody
self.stochastic_observations = stochastic_observations
self.algorithm = algorithm
self.priming_mode = priming_mode
self.midi_primer = midi_primer
self.training_file_list = training_file_list
self.note_rnn_checkpoint_dir = note_rnn_checkpoint_dir
self.note_rnn_checkpoint_file = note_rnn_checkpoint_file
self.note_rnn_hparams = note_rnn_hparams
self.note_rnn_type = note_rnn_type
if priming_mode == 'single_midi' and midi_primer is None:
tf.logging.fatal('A midi primer file is required when using'
'the single_midi priming mode.')
if note_rnn_checkpoint_dir is None or not note_rnn_checkpoint_dir:
print 'Retrieving checkpoint of Note RNN from Magenta download server.'
urllib.urlretrieve(
'http://download.magenta.tensorflow.org/models/'
'rl_tuner_note_rnn.ckpt', 'note_rnn.ckpt')
self.note_rnn_checkpoint_dir = os.getcwd()
self.note_rnn_checkpoint_file = os.path.join(os.getcwd(),
'note_rnn.ckpt')
if self.note_rnn_hparams is None:
if self.note_rnn_type == 'basic_rnn':
self.note_rnn_hparams = rl_tuner_ops.basic_rnn_hparams()
else:
self.note_rnn_hparams = rl_tuner_ops.default_hparams()
if self.algorithm == 'g' or self.algorithm == 'pure_rl':
self.reward_mode = 'music_theory_only'
if dqn_hparams is None:
self.dqn_hparams = rl_tuner_ops.default_dqn_hparams()
else:
self.dqn_hparams = dqn_hparams
self.discount_rate = tf.constant(self.dqn_hparams.discount_rate)
self.target_network_update_rate = tf.constant(
self.dqn_hparams.target_network_update_rate)
self.optimizer = tf.train.AdamOptimizer()
# DQN state.
self.actions_executed_so_far = 0
self.experience = deque(maxlen=self.dqn_hparams.max_experience)
self.iteration = 0
self.summary_writer = summary_writer
self.num_times_store_called = 0
self.num_times_train_called = 0
# Stored reward metrics.
self.reward_last_n = 0
self.rewards_batched = []
self.music_theory_reward_last_n = 0
self.music_theory_rewards_batched = []
self.note_rnn_reward_last_n = 0
self.note_rnn_rewards_batched = []
self.eval_avg_reward = []
self.eval_avg_music_theory_reward = []
self.eval_avg_note_rnn_reward = []
self.target_val_list = []
# Variables to keep track of characteristics of the current composition
# TODO(natashajaques): Implement composition as a class to obtain data
# encapsulation so that you can't accidentally change the leap direction.
self.beat = 0
self.composition = []
self.composition_direction = 0
self.leapt_from = None # stores the note at which composition leapt
self.steps_since_last_leap = 0
if not exists(self.output_dir):
makedirs(self.output_dir)
if initialize_immediately:
self.initialize_internal_models_graph_session()
def initialize_internal_models_graph_session(self,
restore_from_checkpoint=True):
"""Initializes internal RNN models, builds the graph, starts the session.
Adds the graphs of the internal RNN models to this graph, adds the DQN ops
to the graph, and starts a new Saver and session. By having a separate
function for this rather than doing it in the constructor, it allows a model
inheriting from this class to define its q_network differently.
Args:
restore_from_checkpoint: If True, the weights for the 'q_network',
'target_q_network', and 'reward_rnn' will be loaded from a checkpoint.
If false, these models will be initialized with random weights. Useful
for checking what pure RL (with no influence from training data) sounds
like.
"""
with self.graph.as_default():
# Add internal networks to the graph.
tf.logging.info('Initializing q network')
self.q_network = note_rnn_loader.NoteRNNLoader(
self.graph, 'q_network',
self.note_rnn_checkpoint_dir,
midi_primer=self.midi_primer,
training_file_list=self.training_file_list,
checkpoint_file=self.note_rnn_checkpoint_file,
hparams=self.note_rnn_hparams,
note_rnn_type=self.note_rnn_type)
tf.logging.info('Initializing target q network')
self.target_q_network = note_rnn_loader.NoteRNNLoader(
self.graph,
'target_q_network',
self.note_rnn_checkpoint_dir,
midi_primer=self.midi_primer,
training_file_list=self.training_file_list,
checkpoint_file=self.note_rnn_checkpoint_file,
hparams=self.note_rnn_hparams,
note_rnn_type=self.note_rnn_type)
tf.logging.info('Initializing reward network')
self.reward_rnn = note_rnn_loader.NoteRNNLoader(
self.graph, 'reward_rnn',
self.note_rnn_checkpoint_dir,
midi_primer=self.midi_primer,
training_file_list=self.training_file_list,
checkpoint_file=self.note_rnn_checkpoint_file,
hparams=self.note_rnn_hparams,
note_rnn_type=self.note_rnn_type)
tf.logging.info('Q network cell: %s', self.q_network.cell)
# Add rest of variables to graph.
tf.logging.info('Adding RL graph variables')
self.build_graph()
# Prepare saver and session.
self.saver = tf.train.Saver()
self.session = tf.Session(graph=self.graph)
self.session.run(tf.global_variables_initializer())
# Initialize internal networks.
if restore_from_checkpoint:
self.q_network.initialize_and_restore(self.session)
self.target_q_network.initialize_and_restore(self.session)
self.reward_rnn.initialize_and_restore(self.session)
# Double check that the model was initialized from checkpoint properly.
reward_vars = self.reward_rnn.variables()
q_vars = self.q_network.variables()
reward1 = self.session.run(reward_vars[0])
q1 = self.session.run(q_vars[0])
if np.sum((q1 - reward1)**2) == 0.0:
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print '\nSuccessfully initialized internal nets from checkpoint!'
tf.logging.info('\nSuccessfully initialized internal nets from '
'checkpoint!')
else:
tf.logging.fatal('Error! The model was not initialized from '
'checkpoint properly')
else:
self.q_network.initialize_new(self.session)
self.target_q_network.initialize_new(self.session)
self.reward_rnn.initialize_new(self.session)
if self.priming_mode == 'random_midi':
tf.logging.info('Getting priming melodies')
self.get_priming_melodies()
def get_priming_melodies(self):
"""Runs a batch of training data through MelodyRNN model.
If the priming mode is 'random_midi', priming the q-network requires a
random training melody. Therefore this function runs a batch of data from
the training directory through the internal model, and the resulting
internal states of the LSTM are stored in a list. The next note in each
training melody is also stored in a corresponding list called
'priming_notes'. Therefore, to prime the model with a random melody, it is
only necessary to select a random index from 0 to batch_size-1 and use the
hidden states and note at that index as input to the model.
"""
(next_note_softmax,
self.priming_states, lengths) = self.q_network.run_training_batch()
# Get the next note that was predicted for each priming melody to be used
# in priming.
self.priming_notes = [0] * len(lengths)
for i in range(len(lengths)):
# Each melody has TRAIN_SEQUENCE_LENGTH outputs, but the last note is
# actually stored at lengths[i]. The rest is padding.
start_i = i * TRAIN_SEQUENCE_LENGTH
end_i = start_i + lengths[i] - 1
end_softmax = next_note_softmax[end_i, :]
self.priming_notes[i] = np.argmax(end_softmax)
tf.logging.info('Stored priming notes: %s', self.priming_notes)
def prime_internal_model(self, model):
"""Prime an internal model such as the q_network based on priming mode.
Args:
model: The internal model that should be primed.
Returns:
The first observation to feed into the model.
"""
model.state_value = model.get_zero_state()
if self.priming_mode == 'random_midi':
priming_idx = np.random.randint(0, len(self.priming_states))
model.state_value = np.reshape(
self.priming_states[priming_idx, :],
(1, model.cell.state_size))
priming_note = self.priming_notes[priming_idx]
next_obs = np.array(
rl_tuner_ops.make_onehot([priming_note], self.num_actions)).flatten()
tf.logging.debug(
'Feeding priming state for midi file %s and corresponding note %s',
priming_idx, priming_note)
elif self.priming_mode == 'single_midi':
model.prime_model()
next_obs = model.priming_note
elif self.priming_mode == 'random_note':
next_obs = self.get_random_note()
else:
tf.logging.warn('Error! Invalid priming mode. Priming with random note')
next_obs = self.get_random_note()
return next_obs
def get_random_note(self):
"""Samle a note uniformly at random.
Returns:
random note
"""
note_idx = np.random.randint(0, self.num_actions - 1)
return np.array(rl_tuner_ops.make_onehot([note_idx],
self.num_actions)).flatten()
def reset_composition(self):
"""Starts the models internal composition over at beat 0, with no notes.
Also resets statistics about whether the composition is in the middle of a
melodic leap.
"""
self.beat = 0
self.composition = []
self.composition_direction = 0
self.leapt_from = None
self.steps_since_last_leap = 0
def build_graph(self):
"""Builds the reinforcement learning tensorflow graph."""
tf.logging.info('Adding reward computation portion of the graph')
with tf.name_scope('reward_computation'):
self.reward_scores = tf.identity(self.reward_rnn(), name='reward_scores')
tf.logging.info('Adding taking action portion of graph')
with tf.name_scope('taking_action'):
# Output of the q network gives the value of taking each action (playing
# each note).
self.action_scores = tf.identity(self.q_network(), name='action_scores')
tf.summary.histogram(
'action_scores', self.action_scores)
# The action values for the G algorithm are computed differently.
if self.algorithm == 'g':
self.g_action_scores = self.action_scores + self.reward_scores
# Compute predicted action, which is the argmax of the action scores.
self.action_softmax = tf.nn.softmax(self.g_action_scores,
name='action_softmax')
self.predicted_actions = tf.one_hot(tf.argmax(self.g_action_scores,
dimension=1,
name='predicted_actions'),
self.num_actions)
else:
# Compute predicted action, which is the argmax of the action scores.
self.action_softmax = tf.nn.softmax(self.action_scores,
name='action_softmax')
self.predicted_actions = tf.one_hot(tf.argmax(self.action_scores,
dimension=1,
name='predicted_actions'),
self.num_actions)
tf.logging.info('Add estimating future rewards portion of graph')
with tf.name_scope('estimating_future_rewards'):
# The target q network is used to estimate the value of the best action at
# the state resulting from the current action.
self.next_action_scores = tf.stop_gradient(self.target_q_network())
tf.summary.histogram(
'target_action_scores', self.next_action_scores)
# Rewards are observed from the environment and are fed in later.
self.rewards = tf.placeholder(tf.float32, (None,), name='rewards')
# Each algorithm is attempting to model future rewards with a different
# function.
if self.algorithm == 'psi':
self.target_vals = tf.reduce_logsumexp(self.next_action_scores,
reduction_indices=[1,])
elif self.algorithm == 'g':
self.g_normalizer = tf.reduce_logsumexp(self.reward_scores,
reduction_indices=[1,])
self.g_normalizer = tf.reshape(self.g_normalizer, [-1, 1])
self.g_normalizer = tf.tile(self.g_normalizer, [1, self.num_actions])
self.g_action_scores = tf.subtract(
(self.next_action_scores + self.reward_scores), self.g_normalizer)
self.target_vals = tf.reduce_logsumexp(self.g_action_scores,
reduction_indices=[1,])
else:
# Use default based on Q learning.
self.target_vals = tf.reduce_max(self.next_action_scores,
reduction_indices=[1,])
# Total rewards are the observed rewards plus discounted estimated future
# rewards.
self.future_rewards = self.rewards + self.discount_rate * self.target_vals
tf.logging.info('Adding q value prediction portion of graph')
with tf.name_scope('q_value_prediction'):
# Action mask will be a one-hot encoding of the action the network
# actually took.
self.action_mask = tf.placeholder(tf.float32, (None, self.num_actions),
name='action_mask')
self.masked_action_scores = tf.reduce_sum(self.action_scores *
self.action_mask,
reduction_indices=[1,])
temp_diff = self.masked_action_scores - self.future_rewards
# Prediction error is the mean squared error between the reward the
# network actually received for a given action, and what it expected to
# receive.
self.prediction_error = tf.reduce_mean(tf.square(temp_diff))
# Compute gradients.
self.params = tf.trainable_variables()
self.gradients = self.optimizer.compute_gradients(self.prediction_error)
# Clip gradients.
for i, (grad, var) in enumerate(self.gradients):
if grad is not None:
self.gradients[i] = (tf.clip_by_norm(grad, 5), var)
for grad, var in self.gradients:
tf.summary.histogram(var.name, var)
if grad is not None:
tf.summary.histogram(var.name + '/gradients', grad)
# Backprop.
self.train_op = self.optimizer.apply_gradients(self.gradients)
tf.logging.info('Adding target network update portion of graph')
with tf.name_scope('target_network_update'):
# Updates the target_q_network to be similar to the q_network based on
# the target_network_update_rate.
self.target_network_update = []
for v_source, v_target in zip(self.q_network.variables(),
self.target_q_network.variables()):
# Equivalent to target = (1-alpha) * target + alpha * source
update_op = v_target.assign_sub(self.target_network_update_rate *
(v_target - v_source))
self.target_network_update.append(update_op)
self.target_network_update = tf.group(*self.target_network_update)
tf.summary.scalar(
'prediction_error', self.prediction_error)
self.summarize = tf.summary.merge_all()
self.no_op1 = tf.no_op()
def train(self, num_steps=10000, exploration_period=5000, enable_random=True):
"""Main training function that allows model to act, collects reward, trains.
Iterates a number of times, getting the model to act each time, saving the
experience, and performing backprop.
Args:
num_steps: The number of training steps to execute.
exploration_period: The number of steps over which the probability of
exploring (taking a random action) is annealed from 1.0 to the model's
random_action_probability.
enable_random: If False, the model will not be able to act randomly /
explore.
"""
tf.logging.info('Evaluating initial model...')
self.evaluate_model()
self.actions_executed_so_far = 0
if self.stochastic_observations:
tf.logging.info('Using stochastic environment')
sample_next_obs = False
if self.exploration_mode == 'boltzmann' or self.stochastic_observations:
sample_next_obs = True
self.reset_composition()
last_observation = self.prime_internal_models()
for i in range(num_steps):
# Experiencing observation, state, action, reward, new observation,
# new state tuples, and storing them.
state = np.array(self.q_network.state_value).flatten()
action, new_observation, reward_scores = self.action(
last_observation, exploration_period, enable_random=enable_random,
sample_next_obs=sample_next_obs)
new_state = np.array(self.q_network.state_value).flatten()
new_reward_state = np.array(self.reward_rnn.state_value).flatten()
reward = self.collect_reward(last_observation, new_observation,
reward_scores)
self.store(last_observation, state, action, reward, new_observation,
new_state, new_reward_state)
# Used to keep track of how the reward is changing over time.
self.reward_last_n += reward
# Used to keep track of the current musical composition and beat for
# the reward functions.
self.composition.append(np.argmax(new_observation))
self.beat += 1
if i > 0 and i % self.output_every_nth == 0:
tf.logging.info('Evaluating model...')
self.evaluate_model()
self.save_model(self.algorithm)
if self.algorithm == 'g':
self.rewards_batched.append(
self.music_theory_reward_last_n + self.note_rnn_reward_last_n)
else:
self.rewards_batched.append(self.reward_last_n)
self.music_theory_rewards_batched.append(
self.music_theory_reward_last_n)
self.note_rnn_rewards_batched.append(self.note_rnn_reward_last_n)
# Save a checkpoint.
save_step = len(self.rewards_batched)*self.output_every_nth
self.saver.save(self.session, self.save_path, global_step=save_step)
r = self.reward_last_n
tf.logging.info('Training iteration %s', i)
tf.logging.info('\tReward for last %s steps: %s',
self.output_every_nth, r)
tf.logging.info('\t\tMusic theory reward: %s',
self.music_theory_reward_last_n)
tf.logging.info('\t\tNote RNN reward: %s', self.note_rnn_reward_last_n)
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print 'Training iteration', i
print '\tReward for last', self.output_every_nth, 'steps:', r
print '\t\tMusic theory reward:', self.music_theory_reward_last_n
print '\t\tNote RNN reward:', self.note_rnn_reward_last_n
if self.exploration_mode == 'egreedy':
exploration_p = rl_tuner_ops.linear_annealing(
self.actions_executed_so_far, exploration_period, 1.0,
self.dqn_hparams.random_action_probability)
tf.logging.info('\tExploration probability is %s', exploration_p)
self.reward_last_n = 0
self.music_theory_reward_last_n = 0
self.note_rnn_reward_last_n = 0
# Backprop.
self.training_step()
# Update current state as last state.
last_observation = new_observation
# Reset the state after each composition is complete.
if self.beat % self.num_notes_in_melody == 0:
tf.logging.debug('\nResetting composition!\n')
self.reset_composition()
last_observation = self.prime_internal_models()
def action(self, observation, exploration_period=0, enable_random=True,
sample_next_obs=False):
"""Given an observation, runs the q_network to choose the current action.
Does not backprop.
Args:
observation: A one-hot encoding of a single observation (note).
exploration_period: The total length of the period the network will
spend exploring, as set in the train function.
enable_random: If False, the network cannot act randomly.
sample_next_obs: If True, the next observation will be sampled from
the softmax probabilities produced by the model, and passed back
along with the action. If False, only the action is passed back.
Returns:
The action chosen, the reward_scores returned by the reward_rnn, and the
next observation. If sample_next_obs is False, the next observation is
equal to the action.
"""
assert len(observation.shape) == 1, 'Single observation only'
self.actions_executed_so_far += 1
if self.exploration_mode == 'egreedy':
# Compute the exploration probability.
exploration_p = rl_tuner_ops.linear_annealing(
self.actions_executed_so_far, exploration_period, 1.0,
self.dqn_hparams.random_action_probability)
elif self.exploration_mode == 'boltzmann':
enable_random = False
sample_next_obs = True
# Run the observation through the q_network.
input_batch = np.reshape(observation,
(self.q_network.batch_size, 1, self.input_size))
lengths = np.full(self.q_network.batch_size, 1, dtype=int)
(action, action_softmax, self.q_network.state_value,
reward_scores, self.reward_rnn.state_value) = self.session.run(
[self.predicted_actions, self.action_softmax,
self.q_network.state_tensor, self.reward_scores,
self.reward_rnn.state_tensor],
{self.q_network.melody_sequence: input_batch,
self.q_network.initial_state: self.q_network.state_value,
self.q_network.lengths: lengths,
self.reward_rnn.melody_sequence: input_batch,
self.reward_rnn.initial_state: self.reward_rnn.state_value,
self.reward_rnn.lengths: lengths})
reward_scores = np.reshape(reward_scores, (self.num_actions))
action_softmax = np.reshape(action_softmax, (self.num_actions))
action = np.reshape(action, (self.num_actions))
if enable_random and random.random() < exploration_p:
note = self.get_random_note()
return note, note, reward_scores
else:
if not sample_next_obs:
return action, action, reward_scores
else:
obs_note = rl_tuner_ops.sample_softmax(action_softmax)
next_obs = np.array(
rl_tuner_ops.make_onehot([obs_note], self.num_actions)).flatten()
return action, next_obs, reward_scores
def store(self, observation, state, action, reward, newobservation, newstate,
new_reward_state):
"""Stores an experience in the model's experience replay buffer.
One experience consists of an initial observation and internal LSTM state,
which led to the execution of an action, the receipt of a reward, and
finally a new observation and a new LSTM internal state.
Args:
observation: A one hot encoding of an observed note.
state: The internal state of the q_network MelodyRNN LSTM model.
action: A one hot encoding of action taken by network.
reward: Reward received for taking the action.
newobservation: The next observation that resulted from the action.
Unless stochastic_observations is True, the action and new
observation will be the same.
newstate: The internal state of the q_network MelodyRNN that is
observed after taking the action.
new_reward_state: The internal state of the reward_rnn network that is
observed after taking the action
"""
if self.num_times_store_called % self.dqn_hparams.store_every_nth == 0:
self.experience.append((observation, state, action, reward,
newobservation, newstate, new_reward_state))
self.num_times_store_called += 1
def training_step(self):
"""Backpropagate prediction error from a randomly sampled experience batch.
A minibatch of experiences is randomly sampled from the model's experience
replay buffer and used to update the weights of the q_network and
target_q_network.
"""
if self.num_times_train_called % self.dqn_hparams.train_every_nth == 0:
if len(self.experience) < self.dqn_hparams.minibatch_size:
return
# Sample experience.
samples = random.sample(range(len(self.experience)),
self.dqn_hparams.minibatch_size)
samples = [self.experience[i] for i in samples]
# Batch states.
states = np.empty((len(samples), self.q_network.cell.state_size))
new_states = np.empty((len(samples),
self.target_q_network.cell.state_size))
reward_new_states = np.empty((len(samples),
self.reward_rnn.cell.state_size))
observations = np.empty((len(samples), self.input_size))
new_observations = np.empty((len(samples), self.input_size))
action_mask = np.zeros((len(samples), self.num_actions))
rewards = np.empty((len(samples),))
lengths = np.full(len(samples), 1, dtype=int)
for i, (o, s, a, r, new_o, new_s, reward_s) in enumerate(samples):
observations[i, :] = o
new_observations[i, :] = new_o
states[i, :] = s
new_states[i, :] = new_s
action_mask[i, :] = a
rewards[i] = r
reward_new_states[i, :] = reward_s
observations = np.reshape(observations,
(len(samples), 1, self.input_size))
new_observations = np.reshape(new_observations,
(len(samples), 1, self.input_size))
calc_summaries = self.iteration % 100 == 0
calc_summaries = calc_summaries and self.summary_writer is not None
if self.algorithm == 'g':
_, _, target_vals, summary_str = self.session.run([
self.prediction_error,
self.train_op,
self.target_vals,
self.summarize if calc_summaries else self.no_op1,
], {
self.reward_rnn.melody_sequence: new_observations,
self.reward_rnn.initial_state: reward_new_states,
self.reward_rnn.lengths: lengths,
self.q_network.melody_sequence: observations,
self.q_network.initial_state: states,
self.q_network.lengths: lengths,
self.target_q_network.melody_sequence: new_observations,
self.target_q_network.initial_state: new_states,
self.target_q_network.lengths: lengths,
self.action_mask: action_mask,
self.rewards: rewards,
})
else:
_, _, target_vals, summary_str = self.session.run([
self.prediction_error,
self.train_op,
self.target_vals,
self.summarize if calc_summaries else self.no_op1,
], {
self.q_network.melody_sequence: observations,
self.q_network.initial_state: states,
self.q_network.lengths: lengths,
self.target_q_network.melody_sequence: new_observations,
self.target_q_network.initial_state: new_states,
self.target_q_network.lengths: lengths,
self.action_mask: action_mask,
self.rewards: rewards,
})
total_logs = (self.iteration * self.dqn_hparams.train_every_nth)
if total_logs % self.output_every_nth == 0:
self.target_val_list.append(np.mean(target_vals))
self.session.run(self.target_network_update)
if calc_summaries:
self.summary_writer.add_summary(summary_str, self.iteration)
self.iteration += 1
self.num_times_train_called += 1
def evaluate_model(self, num_trials=100, sample_next_obs=True):
"""Used to evaluate the rewards the model receives without exploring.
Generates num_trials compositions and computes the note_rnn and music
theory rewards. Uses no exploration so rewards directly relate to the
model's policy. Stores result in internal variables.
Args:
num_trials: The number of compositions to use for evaluation.
sample_next_obs: If True, the next note the model plays will be
sampled from its output distribution. If False, the model will
deterministically choose the note with maximum value.
"""
note_rnn_rewards = [0] * num_trials
music_theory_rewards = [0] * num_trials
total_rewards = [0] * num_trials
for t in range(num_trials):
last_observation = self.prime_internal_models()
self.reset_composition()
for _ in range(self.num_notes_in_melody):
_, new_observation, reward_scores = self.action(
last_observation,
0,
enable_random=False,
sample_next_obs=sample_next_obs)
note_rnn_reward = self.reward_from_reward_rnn_scores(new_observation,
reward_scores)
music_theory_reward = self.reward_music_theory(new_observation)
adjusted_mt_reward = self.reward_scaler * music_theory_reward
total_reward = note_rnn_reward + adjusted_mt_reward
note_rnn_rewards[t] = note_rnn_reward
music_theory_rewards[t] = music_theory_reward * self.reward_scaler
total_rewards[t] = total_reward
self.composition.append(np.argmax(new_observation))
self.beat += 1
last_observation = new_observation
self.eval_avg_reward.append(np.mean(total_rewards))
self.eval_avg_note_rnn_reward.append(np.mean(note_rnn_rewards))
self.eval_avg_music_theory_reward.append(np.mean(music_theory_rewards))
def collect_reward(self, obs, action, reward_scores):
"""Calls whatever reward function is indicated in the reward_mode field.
New reward functions can be written and called from here. Note that the
reward functions can make use of the musical composition that has been
played so far, which is stored in self.composition. Some reward functions
are made up of many smaller functions, such as those related to music
theory.
Args:
obs: A one-hot encoding of the observed note.
action: A one-hot encoding of the chosen action.
reward_scores: The value for each note output by the reward_rnn.
Returns:
Float reward value.
"""
# Gets and saves log p(a|s) as output by reward_rnn.
note_rnn_reward = self.reward_from_reward_rnn_scores(action, reward_scores)
self.note_rnn_reward_last_n += note_rnn_reward
if self.reward_mode == 'scale':
# Makes the model play a scale (defaults to c major).
reward = self.reward_scale(obs, action)
elif self.reward_mode == 'key':
# Makes the model play within a key.
reward = self.reward_key_distribute_prob(action)
elif self.reward_mode == 'key_and_tonic':
# Makes the model play within a key, while starting and ending on the
# tonic note.
reward = self.reward_key(action)
reward += self.reward_tonic(action)
elif self.reward_mode == 'non_repeating':
# The model can play any composition it wants, but receives a large
# negative reward for playing the same note repeatedly.
reward = self.reward_non_repeating(action)
elif self.reward_mode == 'music_theory_random':
# The model receives reward for playing in key, playing tonic notes,
# and not playing repeated notes. However the rewards it receives are
# uniformly distributed over all notes that do not violate these rules.
reward = self.reward_key(action)
reward += self.reward_tonic(action)
reward += self.reward_penalize_repeating(action)
elif self.reward_mode == 'music_theory_basic':
# As above, the model receives reward for playing in key, tonic notes
# at the appropriate times, and not playing repeated notes. However, the
# rewards it receives are based on the note probabilities learned from
# data in the original model.
reward = self.reward_key(action)
reward += self.reward_tonic(action)
reward += self.reward_penalize_repeating(action)
return reward * self.reward_scaler + note_rnn_reward
elif self.reward_mode == 'music_theory_basic_plus_variety':
# Uses the same reward function as above, but adds a penalty for
# compositions with a high autocorrelation (aka those that don't have
# sufficient variety).
reward = self.reward_key(action)
reward += self.reward_tonic(action)
reward += self.reward_penalize_repeating(action)
reward += self.reward_penalize_autocorrelation(action)
return reward * self.reward_scaler + note_rnn_reward
elif self.reward_mode == 'preferred_intervals':
reward = self.reward_preferred_intervals(action)
elif self.reward_mode == 'music_theory_all':
tf.logging.debug('Note RNN reward: %s', note_rnn_reward)
reward = self.reward_music_theory(action)
tf.logging.debug('Total music theory reward: %s',
self.reward_scaler * reward)
tf.logging.debug('Total note rnn reward: %s', note_rnn_reward)
self.music_theory_reward_last_n += reward * self.reward_scaler
return reward * self.reward_scaler + note_rnn_reward
elif self.reward_mode == 'music_theory_only':
reward = self.reward_music_theory(action)
else:
tf.logging.fatal('ERROR! Not a valid reward mode. Cannot compute reward')
self.music_theory_reward_last_n += reward * self.reward_scaler
return reward * self.reward_scaler
def reward_from_reward_rnn_scores(self, action, reward_scores):
"""Rewards based on probabilities learned from data by trained RNN.
Computes the reward_network's learned softmax probabilities. When used as
rewards, allows the model to maintain information it learned from data.
Args:
action: A one-hot encoding of the chosen action.
reward_scores: The value for each note output by the reward_rnn.
Returns:
Float reward value.
"""
action_note = np.argmax(action)
normalization_constant = logsumexp(reward_scores)
return reward_scores[action_note] - normalization_constant
def get_reward_rnn_scores(self, observation, state):
"""Get note scores from the reward_rnn to use as a reward based on data.
Runs the reward_rnn on an observation and initial state. Useful for
maintaining the probabilities of the original LSTM model while training with
reinforcement learning.
Args:
observation: One-hot encoding of the observed note.
state: Vector representing the internal state of the target_q_network
LSTM.
Returns:
Action scores produced by reward_rnn.
"""
state = np.atleast_2d(state)
input_batch = np.reshape(observation, (self.reward_rnn.batch_size, 1,
self.num_actions))
lengths = np.full(self.reward_rnn.batch_size, 1, dtype=int)
rewards, = self.session.run(
self.reward_scores,
{self.reward_rnn.melody_sequence: input_batch,
self.reward_rnn.initial_state: state,
self.reward_rnn.lengths: lengths})
return rewards
def reward_music_theory(self, action):
"""Computes cumulative reward for all music theory functions.
Args:
action: A one-hot encoding of the chosen action.
Returns:
Float reward value.
"""
reward = self.reward_key(action)
tf.logging.debug('Key: %s', reward)
prev_reward = reward
reward += self.reward_tonic(action)
if reward != prev_reward:
tf.logging.debug('Tonic: %s', reward)
prev_reward = reward
reward += self.reward_penalize_repeating(action)
if reward != prev_reward:
tf.logging.debug('Penalize repeating: %s', reward)
prev_reward = reward
reward += self.reward_penalize_autocorrelation(action)
if reward != prev_reward:
tf.logging.debug('Penalize autocorr: %s', reward)
prev_reward = reward
reward += self.reward_motif(action)
if reward != prev_reward:
tf.logging.debug('Reward motif: %s', reward)
prev_reward = reward
reward += self.reward_repeated_motif(action)
if reward != prev_reward:
tf.logging.debug('Reward repeated motif: %s', reward)
prev_reward = reward
# New rewards based on Gauldin's book, "A Practical Approach to Eighteenth
# Century Counterpoint"
reward += self.reward_preferred_intervals(action)
if reward != prev_reward:
tf.logging.debug('Reward preferred_intervals: %s', reward)
prev_reward = reward
reward += self.reward_leap_up_back(action)
if reward != prev_reward:
tf.logging.debug('Reward leap up back: %s', reward)
prev_reward = reward
reward += self.reward_high_low_unique(action)
if reward != prev_reward:
tf.logging.debug('Reward high low unique: %s', reward)
return reward
def random_reward_shift_to_mean(self, reward):
"""Modifies reward by a small random values s to pull it towards the mean.
If reward is above the mean, s is subtracted; if reward is below the mean,
s is added. The random value is in the range 0-0.2. This function is helpful
to ensure that the model does not become too certain about playing a
particular note.
Args:
reward: A reward value that has already been computed by another reward
function.
Returns:
Original float reward value modified by scaler.
"""
s = np.random.randint(0, 2) * .1
if reward > .5:
reward -= s
else:
reward += s
return reward
def reward_scale(self, obs, action, scale=None):
"""Reward function that trains the model to play a scale.
Gives rewards for increasing notes, notes within the desired scale, and two
consecutive notes from the scale.
Args:
obs: A one-hot encoding of the observed note.
action: A one-hot encoding of the chosen action.
scale: The scale the model should learn. Defaults to C Major if not
provided.
Returns:
Float reward value.
"""
if scale is None:
scale = rl_tuner_ops.C_MAJOR_SCALE
obs = np.argmax(obs)
action = np.argmax(action)
reward = 0
if action == 1:
reward += .1
if action > obs and action < obs + 3:
reward += .05
if action in scale:
reward += .01
if obs in scale:
action_pos = scale.index(action)
obs_pos = scale.index(obs)
if obs_pos == len(scale) - 1 and action_pos == 0:
reward += .8
elif action_pos == obs_pos + 1:
reward += .8
return reward
def reward_key_distribute_prob(self, action, key=None):
"""Reward function that rewards the model for playing within a given key.
Any note within the key is given equal reward, which can cause the model to
learn random sounding compositions.
Args:
action: One-hot encoding of the chosen action.
key: The numeric values of notes belonging to this key. Defaults to C
Major if not provided.
Returns:
Float reward value.
"""
if key is None:
key = rl_tuner_ops.C_MAJOR_KEY
reward = 0
action_note = np.argmax(action)
if action_note in key:
num_notes_in_key = len(key)
extra_prob = 1.0 / num_notes_in_key
reward = extra_prob
return reward
def reward_key(self, action, penalty_amount=-1.0, key=None):
"""Applies a penalty for playing notes not in a specific key.
Args:
action: One-hot encoding of the chosen action.
penalty_amount: The amount the model will be penalized if it plays
a note outside the key.
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
Float reward value.
"""
if key is None:
key = rl_tuner_ops.C_MAJOR_KEY
reward = 0
action_note = np.argmax(action)
if action_note not in key:
reward = penalty_amount
return reward
def reward_tonic(self, action, tonic_note=rl_tuner_ops.C_MAJOR_TONIC,
reward_amount=3.0):
"""Rewards for playing the tonic note at the right times.
Rewards for playing the tonic as the first note of the first bar, and the
first note of the final bar.
Args:
action: One-hot encoding of the chosen action.
tonic_note: The tonic/1st note of the desired key.
reward_amount: The amount the model will be awarded if it plays the
tonic note at the right time.
Returns:
Float reward value.
"""
action_note = np.argmax(action)
first_note_of_final_bar = self.num_notes_in_melody - 4
if self.beat == 0 or self.beat == first_note_of_final_bar:
if action_note == tonic_note:
return reward_amount
elif self.beat == first_note_of_final_bar + 1:
if action_note == NO_EVENT:
return reward_amount
elif self.beat > first_note_of_final_bar + 1:
if action_note == NO_EVENT or action_note == NOTE_OFF:
return reward_amount
return 0.0
def reward_non_repeating(self, action):
"""Rewards the model for not playing the same note over and over.
Penalizes the model for playing the same note repeatedly, although more
repeititions are allowed if it occasionally holds the note or rests in
between. Reward is uniform when there is no penalty.
Args:
action: One-hot encoding of the chosen action.
Returns:
Float reward value.
"""
penalty = self.reward_penalize_repeating(action)
if penalty >= 0:
return .1
def detect_repeating_notes(self, action_note):
"""Detects whether the note played is repeating previous notes excessively.
Args:
action_note: An integer representing the note just played.
Returns:
True if the note just played is excessively repeated, False otherwise.
"""
num_repeated = 0
contains_held_notes = False
contains_breaks = False
# Note that the current action yas not yet been added to the composition
for i in xrange(len(self.composition)-1, -1, -1):
if self.composition[i] == action_note:
num_repeated += 1
elif self.composition[i] == NOTE_OFF:
contains_breaks = True
elif self.composition[i] == NO_EVENT:
contains_held_notes = True
else:
break
if action_note == NOTE_OFF and num_repeated > 1:
return True
elif not contains_held_notes and not contains_breaks:
if num_repeated > 4:
return True
elif contains_held_notes or contains_breaks:
if num_repeated > 6:
return True
else:
if num_repeated > 8:
return True
return False
def reward_penalize_repeating(self,
action,
penalty_amount=-100.0):
"""Sets the previous reward to 0 if the same is played repeatedly.
Allows more repeated notes if there are held notes or rests in between. If
no penalty is applied will return the previous reward.
Args:
action: One-hot encoding of the chosen action.
penalty_amount: The amount the model will be penalized if it plays
repeating notes.
Returns:
Previous reward or 'penalty_amount'.
"""
action_note = np.argmax(action)
is_repeating = self.detect_repeating_notes(action_note)
if is_repeating:
return penalty_amount
else:
return 0.0
def reward_penalize_autocorrelation(self,
action,
penalty_weight=3.0):
"""Reduces the previous reward if the composition is highly autocorrelated.
Penalizes the model for creating a composition that is highly correlated
with itself at lags of 1, 2, and 3 beats previous. This is meant to
encourage variety in compositions.
Args:
action: One-hot encoding of the chosen action.
penalty_weight: The default weight which will be multiplied by the sum
of the autocorrelation coefficients, and subtracted from prev_reward.
Returns:
Float reward value.
"""
composition = self.composition + [np.argmax(action)]
lags = [1, 2, 3]
sum_penalty = 0
for lag in lags:
coeff = rl_tuner_ops.autocorrelate(composition, lag=lag)
if not np.isnan(coeff):
if np.abs(coeff) > 0.15:
sum_penalty += np.abs(coeff) * penalty_weight
return -sum_penalty
def detect_last_motif(self, composition=None, bar_length=8):
"""Detects if a motif was just played and if so, returns it.
A motif should contain at least three distinct notes that are not note_on
or note_off, and occur within the course of one bar.
Args:
composition: The composition in which the function will look for a
recent motif. Defaults to the model's composition.
bar_length: The number of notes in one bar.
Returns:
None if there is no motif, otherwise the motif in the same format as the
composition.
"""
if composition is None:
composition = self.composition
if len(composition) < bar_length:
return None, 0
last_bar = composition[-bar_length:]
actual_notes = [a for a in last_bar if a != NO_EVENT and a != NOTE_OFF]
num_unique_notes = len(set(actual_notes))
if num_unique_notes >= 3:
return last_bar, num_unique_notes
else:
return None, num_unique_notes
def reward_motif(self, action, reward_amount=3.0):
"""Rewards the model for playing any motif.
Motif must have at least three distinct notes in the course of one bar.
There is a bonus for playing more complex motifs; that is, ones that involve
a greater number of notes.
Args:
action: One-hot encoding of the chosen action.
reward_amount: The amount that will be returned if the last note belongs
to a motif.
Returns:
Float reward value.
"""
composition = self.composition + [np.argmax(action)]
motif, num_notes_in_motif = self.detect_last_motif(composition=composition)
if motif is not None:
motif_complexity_bonus = max((num_notes_in_motif - 3)*.3, 0)
return reward_amount + motif_complexity_bonus
else:
return 0.0
def detect_repeated_motif(self, action, bar_length=8):
"""Detects whether the last motif played repeats an earlier motif played.
Args:
action: One-hot encoding of the chosen action.
bar_length: The number of beats in one bar. This determines how many beats
the model has in which to play the motif.
Returns:
True if the note just played belongs to a motif that is repeated. False
otherwise.
"""
composition = self.composition + [np.argmax(action)]
if len(composition) < bar_length:
return False, None
motif, _ = self.detect_last_motif(
composition=composition, bar_length=bar_length)
if motif is None:
return False, None
prev_composition = self.composition[:-(bar_length-1)]
# Check if the motif is in the previous composition.
for i in range(len(prev_composition) - len(motif) + 1):
for j in range(len(motif)):
if prev_composition[i + j] != motif[j]:
break
else:
return True, motif
return False, None
def reward_repeated_motif(self,
action,
bar_length=8,
reward_amount=4.0):
"""Adds a big bonus to previous reward if the model plays a repeated motif.
Checks if the model has just played a motif that repeats an ealier motif in
the composition.
There is also a bonus for repeating more complex motifs.
Args:
action: One-hot encoding of the chosen action.
bar_length: The number of notes in one bar.
reward_amount: The amount that will be added to the reward if the last
note belongs to a repeated motif.
Returns:
Float reward value.
"""
is_repeated, motif = self.detect_repeated_motif(action, bar_length)
if is_repeated:
actual_notes = [a for a in motif if a != NO_EVENT and a != NOTE_OFF]
num_notes_in_motif = len(set(actual_notes))
motif_complexity_bonus = max(num_notes_in_motif - 3, 0)
return reward_amount + motif_complexity_bonus
else:
return 0.0
def detect_sequential_interval(self, action, key=None):
"""Finds the melodic interval between the action and the last note played.
Uses constants to represent special intervals like rests.
Args:
action: One-hot encoding of the chosen action
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
An integer value representing the interval, or a constant value for
special intervals.
"""
if not self.composition:
return 0, None, None
prev_note = self.composition[-1]
action_note = np.argmax(action)
c_major = False
if key is None:
key = rl_tuner_ops.C_MAJOR_KEY
c_notes = [2, 14, 26]
g_notes = [9, 21, 33]
e_notes = [6, 18, 30]
c_major = True
tonic_notes = [2, 14, 26]
fifth_notes = [9, 21, 33]
# get rid of non-notes in prev_note
prev_note_index = len(self.composition) - 1
while (prev_note == NO_EVENT or
prev_note == NOTE_OFF) and prev_note_index >= 0:
prev_note = self.composition[prev_note_index]
prev_note_index -= 1
if prev_note == NOTE_OFF or prev_note == NO_EVENT:
tf.logging.debug('Action_note: %s, prev_note: %s', action_note, prev_note)
return 0, action_note, prev_note
tf.logging.debug('Action_note: %s, prev_note: %s', action_note, prev_note)
# get rid of non-notes in action_note
if action_note == NO_EVENT:
if prev_note in tonic_notes or prev_note in fifth_notes:
return (rl_tuner_ops.HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH,
action_note, prev_note)
else:
return rl_tuner_ops.HOLD_INTERVAL, action_note, prev_note
elif action_note == NOTE_OFF:
if prev_note in tonic_notes or prev_note in fifth_notes:
return (rl_tuner_ops.REST_INTERVAL_AFTER_THIRD_OR_FIFTH,
action_note, prev_note)
else:
return rl_tuner_ops.REST_INTERVAL, action_note, prev_note
interval = abs(action_note - prev_note)
if c_major and interval == rl_tuner_ops.FIFTH and (
prev_note in c_notes or prev_note in g_notes):
return rl_tuner_ops.IN_KEY_FIFTH, action_note, prev_note
if c_major and interval == rl_tuner_ops.THIRD and (
prev_note in c_notes or prev_note in e_notes):
return rl_tuner_ops.IN_KEY_THIRD, action_note, prev_note
return interval, action_note, prev_note
def reward_preferred_intervals(self, action, scaler=5.0, key=None):
"""Dispenses reward based on the melodic interval just played.
Args:
action: One-hot encoding of the chosen action.
scaler: This value will be multiplied by all rewards in this function.
key: The numeric values of notes belonging to this key. Defaults to
C-major if not provided.
Returns:
Float reward value.
"""
interval, _, _ = self.detect_sequential_interval(action, key)
tf.logging.debug('Interval:', interval)
if interval == 0: # either no interval or involving uninteresting rests
tf.logging.debug('No interval or uninteresting.')
return 0.0
reward = 0.0
# rests can be good
if interval == rl_tuner_ops.REST_INTERVAL:
reward = 0.05
tf.logging.debug('Rest interval.')
if interval == rl_tuner_ops.HOLD_INTERVAL:
reward = 0.075
if interval == rl_tuner_ops.REST_INTERVAL_AFTER_THIRD_OR_FIFTH:
reward = 0.15
tf.logging.debug('Rest interval after 1st or 5th.')
if interval == rl_tuner_ops.HOLD_INTERVAL_AFTER_THIRD_OR_FIFTH:
reward = 0.3
# large leaps and awkward intervals bad
if interval == rl_tuner_ops.SEVENTH:
reward = -0.3
tf.logging.debug('7th')
if interval > rl_tuner_ops.OCTAVE:
reward = -1.0
tf.logging.debug('More than octave.')
# common major intervals are good
if interval == rl_tuner_ops.IN_KEY_FIFTH:
reward = 0.1
tf.logging.debug('In key 5th')
if interval == rl_tuner_ops.IN_KEY_THIRD:
reward = 0.15
tf.logging.debug('In key 3rd')
# smaller steps are generally preferred
if interval == rl_tuner_ops.THIRD:
reward = 0.09
tf.logging.debug('3rd')
if interval == rl_tuner_ops.SECOND:
reward = 0.08
tf.logging.debug('2nd')
if interval == rl_tuner_ops.FOURTH:
reward = 0.07
tf.logging.debug('4th')
# larger leaps not as good, especially if not in key
if interval == rl_tuner_ops.SIXTH:
reward = 0.05
tf.logging.debug('6th')
if interval == rl_tuner_ops.FIFTH:
reward = 0.02
tf.logging.debug('5th')
tf.logging.debug('Interval reward', reward * scaler)
return reward * scaler
def detect_high_unique(self, composition):
"""Checks a composition to see if the highest note within it is repeated.
Args:
composition: A list of integers representing the notes in the piece.
Returns:
True if the lowest note was unique, False otherwise.
"""
max_note = max(composition)
if list(composition).count(max_note) == 1:
return True
else:
return False
def detect_low_unique(self, composition):
"""Checks a composition to see if the lowest note within it is repeated.
Args:
composition: A list of integers representing the notes in the piece.
Returns:
True if the lowest note was unique, False otherwise.
"""
no_special_events = [x for x in composition
if x != NO_EVENT and x != NOTE_OFF]
if no_special_events:
min_note = min(no_special_events)
if list(composition).count(min_note) == 1:
return True
return False
def reward_high_low_unique(self, action, reward_amount=3.0):
"""Evaluates if highest and lowest notes in composition occurred once.
Args:
action: One-hot encoding of the chosen action.
reward_amount: Amount of reward that will be given for the highest note
being unique, and again for the lowest note being unique.
Returns:
Float reward value.
"""
if len(self.composition) + 1 != self.num_notes_in_melody:
return 0.0
composition = np.array(self.composition)
composition = np.append(composition, np.argmax(action))
reward = 0.0
if self.detect_high_unique(composition):
reward += reward_amount
if self.detect_low_unique(composition):
reward += reward_amount
return reward
def detect_leap_up_back(self, action, steps_between_leaps=6):
"""Detects when the composition takes a musical leap, and if it is resolved.
When the composition jumps up or down by an interval of a fifth or more,
it is a 'leap'. The model then remembers that is has a 'leap direction'. The
function detects if it then takes another leap in the same direction, if it
leaps back, or if it gradually resolves the leap.
Args:
action: One-hot encoding of the chosen action.
steps_between_leaps: Leaping back immediately does not constitute a
satisfactory resolution of a leap. Therefore the composition must wait
'steps_between_leaps' beats before leaping back.
Returns:
0 if there is no leap, 'LEAP_RESOLVED' if an existing leap has been
resolved, 'LEAP_DOUBLED' if 2 leaps in the same direction were made.
"""
if not self.composition:
return 0
outcome = 0
interval, action_note, prev_note = self.detect_sequential_interval(action)
if action_note == NOTE_OFF or action_note == NO_EVENT:
self.steps_since_last_leap += 1
tf.logging.debug('Rest, adding to steps since last leap. It is'
'now: %s', self.steps_since_last_leap)
return 0
# detect if leap
if interval >= rl_tuner_ops.FIFTH or interval == rl_tuner_ops.IN_KEY_FIFTH:
if action_note > prev_note:
leap_direction = rl_tuner_ops.ASCENDING
tf.logging.debug('Detected an ascending leap')
else:
leap_direction = rl_tuner_ops.DESCENDING
tf.logging.debug('Detected a descending leap')
# there was already an unresolved leap
if self.composition_direction != 0:
if self.composition_direction != leap_direction:
tf.logging.debug('Detected a resolved leap')
tf.logging.debug('Num steps since last leap: %s',
self.steps_since_last_leap)
if self.steps_since_last_leap > steps_between_leaps:
outcome = rl_tuner_ops.LEAP_RESOLVED
tf.logging.debug('Sufficient steps before leap resolved, '
'awarding bonus')
self.composition_direction = 0
self.leapt_from = None
else:
tf.logging.debug('Detected a double leap')
outcome = rl_tuner_ops.LEAP_DOUBLED
# the composition had no previous leaps
else:
tf.logging.debug('There was no previous leap direction')
self.composition_direction = leap_direction
self.leapt_from = prev_note
self.steps_since_last_leap = 0
# there is no leap
else:
self.steps_since_last_leap += 1
tf.logging.debug('No leap, adding to steps since last leap. '
'It is now: %s', self.steps_since_last_leap)
# If there was a leap before, check if composition has gradually returned
# This could be changed by requiring you to only go a 5th back in the
# opposite direction of the leap.
if (self.composition_direction == rl_tuner_ops.ASCENDING and
action_note <= self.leapt_from) or (
self.composition_direction == rl_tuner_ops.DESCENDING and
action_note >= self.leapt_from):
tf.logging.debug('detected a gradually resolved leap')
outcome = rl_tuner_ops.LEAP_RESOLVED
self.composition_direction = 0
self.leapt_from = None
return outcome
def reward_leap_up_back(self, action, resolving_leap_bonus=5.0,
leaping_twice_punishment=-5.0):
"""Applies punishment and reward based on the principle leap up leap back.
Large interval jumps (more than a fifth) should be followed by moving back
in the same direction.
Args:
action: One-hot encoding of the chosen action.
resolving_leap_bonus: Amount of reward dispensed for resolving a previous
leap.
leaping_twice_punishment: Amount of reward received for leaping twice in
the same direction.
Returns:
Float reward value.
"""
leap_outcome = self.detect_leap_up_back(action)
if leap_outcome == rl_tuner_ops.LEAP_RESOLVED:
tf.logging.debug('Leap resolved, awarding %s', resolving_leap_bonus)
return resolving_leap_bonus
elif leap_outcome == rl_tuner_ops.LEAP_DOUBLED:
tf.logging.debug('Leap doubled, awarding %s', leaping_twice_punishment)
return leaping_twice_punishment
else:
return 0.0
def reward_interval_diversity(self):
# TODO(natashajaques): music theory book also suggests having a mix of steps
# that are both incremental and larger. Want to write a function that
# rewards this. Could have some kind of interval_stats stored by
# reward_preferred_intervals function.
pass
def generate_music_sequence(self, title='rltuner_sample',
visualize_probs=False, prob_image_name=None,
length=None, most_probable=False):
"""Generates a music sequence with the current model, and saves it to MIDI.
The resulting MIDI file is saved to the model's output_dir directory. The
sequence is generated by sampling from the output probabilities at each
timestep, and feeding the resulting note back in as input to the model.
Args:
title: The name that will be used to save the output MIDI file.
visualize_probs: If True, the function will plot the softmax
probabilities of the model for each note that occur throughout the
sequence. Useful for debugging.
prob_image_name: The name of a file in which to save the softmax
probability image. If None, the image will simply be displayed.
length: The length of the sequence to be generated. Defaults to the
num_notes_in_melody parameter of the model.
most_probable: If True, instead of sampling each note in the sequence,
the model will always choose the argmax, most probable note.
"""
if length is None:
length = self.num_notes_in_melody
self.reset_composition()
next_obs = self.prime_internal_models()
tf.logging.info('Priming with note %s', np.argmax(next_obs))
lengths = np.full(self.q_network.batch_size, 1, dtype=int)
if visualize_probs:
prob_image = np.zeros((self.input_size, length))
generated_seq = [0] * length
for i in range(length):
input_batch = np.reshape(next_obs, (self.q_network.batch_size, 1,
self.num_actions))
if self.algorithm == 'g':
(softmax, self.q_network.state_value,
self.reward_rnn.state_value) = self.session.run(
[self.action_softmax, self.q_network.state_tensor,
self.reward_rnn.state_tensor],
{self.q_network.melody_sequence: input_batch,
self.q_network.initial_state: self.q_network.state_value,
self.q_network.lengths: lengths,
self.reward_rnn.melody_sequence: input_batch,
self.reward_rnn.initial_state: self.reward_rnn.state_value,
self.reward_rnn.lengths: lengths})
else:
softmax, self.q_network.state_value = self.session.run(
[self.action_softmax, self.q_network.state_tensor],
{self.q_network.melody_sequence: input_batch,
self.q_network.initial_state: self.q_network.state_value,
self.q_network.lengths: lengths})
softmax = np.reshape(softmax, (self.num_actions))
if visualize_probs:
prob_image[:, i] = softmax # np.log(1.0 + softmax)
if most_probable:
sample = np.argmax(softmax)
else:
sample = rl_tuner_ops.sample_softmax(softmax)
generated_seq[i] = sample
next_obs = np.array(rl_tuner_ops.make_onehot([sample],
self.num_actions)).flatten()
tf.logging.info('Generated sequence: %s', generated_seq)
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print 'Generated sequence:', generated_seq
melody = mlib.Melody(rl_tuner_ops.decoder(generated_seq,
self.q_network.transpose_amount))
sequence = melody.to_sequence(qpm=rl_tuner_ops.DEFAULT_QPM)
filename = rl_tuner_ops.get_next_file_name(self.output_dir, title, 'mid')
midi_io.sequence_proto_to_midi_file(sequence, filename)
tf.logging.info('Wrote a melody to %s', self.output_dir)
if visualize_probs:
tf.logging.info('Visualizing note selection probabilities:')
plt.figure()
plt.imshow(prob_image, interpolation='none', cmap='Reds')
plt.ylabel('Note probability')
plt.xlabel('Time (beat)')
plt.gca().invert_yaxis()
if prob_image_name is not None:
plt.savefig(self.output_dir + '/' + prob_image_name)
else:
plt.show()
def evaluate_music_theory_metrics(self, num_compositions=10000, key=None,
tonic_note=rl_tuner_ops.C_MAJOR_TONIC):
"""Computes statistics about music theory rule adherence.
Args:
num_compositions: How many compositions should be randomly generated
for computing the statistics.
key: The numeric values of notes belonging to this key. Defaults to C
Major if not provided.
tonic_note: The tonic/1st note of the desired key.
Returns:
A dictionary containing the statistics.
"""
stat_dict = rl_tuner_eval_metrics.compute_composition_stats(
self,
num_compositions=num_compositions,
composition_length=self.num_notes_in_melody,
key=key,
tonic_note=tonic_note)
return stat_dict
def save_model(self, name, directory=None):
"""Saves a checkpoint of the model and a .npz file with stored rewards.
Args:
name: String name to use for the checkpoint and rewards files.
directory: Path to directory where the data will be saved. Defaults to
self.output_dir if None is provided.
"""
if directory is None:
directory = self.output_dir
save_loc = os.path.join(directory, name)
self.saver.save(self.session, save_loc,
global_step=len(self.rewards_batched)*self.output_every_nth)
self.save_stored_rewards(name)
def save_stored_rewards(self, file_name):
"""Saves the models stored rewards over time in a .npz file.
Args:
file_name: Name of the file that will be saved.
"""
training_epochs = len(self.rewards_batched) * self.output_every_nth
filename = os.path.join(self.output_dir,
file_name + '-' + str(training_epochs))
np.savez(filename,
train_rewards=self.rewards_batched,
train_music_theory_rewards=self.music_theory_rewards_batched,
train_note_rnn_rewards=self.note_rnn_rewards_batched,
eval_rewards=self.eval_avg_reward,
eval_music_theory_rewards=self.eval_avg_music_theory_reward,
eval_note_rnn_rewards=self.eval_avg_note_rnn_reward,
target_val_list=self.target_val_list)
def save_model_and_figs(self, name, directory=None):
"""Saves the model checkpoint, .npz file, and reward plots.
Args:
name: Name of the model that will be used on the images,
checkpoint, and .npz files.
directory: Path to directory where files will be saved.
If None defaults to self.output_dir.
"""
self.save_model(name, directory=directory)
self.plot_rewards(image_name='TrainRewards-' + name + '.eps',
directory=directory)
self.plot_evaluation(image_name='EvaluationRewards-' + name + '.eps',
directory=directory)
self.plot_target_vals(image_name='TargetVals-' + name + '.eps',
directory=directory)
def plot_rewards(self, image_name=None, directory=None):
"""Plots the cumulative rewards received as the model was trained.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.
"""
if directory is None:
directory = self.output_dir
reward_batch = self.output_every_nth
x = [reward_batch * i for i in np.arange(len(self.rewards_batched))]
plt.figure()
plt.plot(x, self.rewards_batched)
plt.plot(x, self.music_theory_rewards_batched)
plt.plot(x, self.note_rnn_rewards_batched)
plt.xlabel('Training epoch')
plt.ylabel('Cumulative reward for last ' + str(reward_batch) + ' steps')
plt.legend(['Total', 'Music theory', 'Note RNN'], loc='best')
if image_name is not None:
plt.savefig(directory + '/' + image_name)
else:
plt.show()
def plot_evaluation(self, image_name=None, directory=None, start_at_epoch=0):
"""Plots the rewards received as the model was evaluated during training.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.
start_at_epoch: Training epoch where the plot should begin.
"""
if directory is None:
directory = self.output_dir
reward_batch = self.output_every_nth
x = [reward_batch * i for i in np.arange(len(self.eval_avg_reward))]
start_index = start_at_epoch / self.output_every_nth
plt.figure()
plt.plot(x[start_index:], self.eval_avg_reward[start_index:])
plt.plot(x[start_index:], self.eval_avg_music_theory_reward[start_index:])
plt.plot(x[start_index:], self.eval_avg_note_rnn_reward[start_index:])
plt.xlabel('Training epoch')
plt.ylabel('Average reward')
plt.legend(['Total', 'Music theory', 'Note RNN'], loc='best')
if image_name is not None:
plt.savefig(directory + '/' + image_name)
else:
plt.show()
def plot_target_vals(self, image_name=None, directory=None):
"""Plots the target values used to train the model over time.
If image_name is None, should be used in jupyter notebook. If
called outside of jupyter, execution of the program will halt and
a pop-up with the graph will appear. Execution will not continue
until the pop-up is closed.
Args:
image_name: Name to use when saving the plot to a file. If not
provided, image will be shown immediately.
directory: Path to directory where figure should be saved. If
None, defaults to self.output_dir.
"""
if directory is None:
directory = self.output_dir
reward_batch = self.output_every_nth
x = [reward_batch * i for i in np.arange(len(self.target_val_list))]
plt.figure()
plt.plot(x, self.target_val_list)
plt.xlabel('Training epoch')
plt.ylabel('Target value')
if image_name is not None:
plt.savefig(directory + '/' + image_name)
else:
plt.show()
def prime_internal_models(self):
"""Primes both internal models based on self.priming_mode.
Returns:
A one-hot encoding of the note output by the q_network to be used as
the initial observation.
"""
self.prime_internal_model(self.target_q_network)
self.prime_internal_model(self.reward_rnn)
next_obs = self.prime_internal_model(self.q_network)
return next_obs
def restore_from_directory(self, directory=None, checkpoint_name=None,
reward_file_name=None):
"""Restores this model from a saved checkpoint.
Args:
directory: Path to directory where checkpoint is located. If
None, defaults to self.output_dir.
checkpoint_name: The name of the checkpoint within the
directory.
reward_file_name: The name of the .npz file where the stored
rewards are saved. If None, will not attempt to load stored
rewards.
"""
if directory is None:
directory = self.output_dir
if checkpoint_name is not None:
checkpoint_file = os.path.join(directory, checkpoint_name)
else:
tf.logging.info('Directory %s.', directory)
checkpoint_file = tf.train.latest_checkpoint(directory)
if checkpoint_file is None:
tf.logging.fatal('Error! Cannot locate checkpoint in the directory')
return
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print 'Attempting to restore from checkpoint', checkpoint_file
tf.logging.info('Attempting to restore from checkpoint %s', checkpoint_file)
self.saver.restore(self.session, checkpoint_file)
if reward_file_name is not None:
npz_file_name = os.path.join(directory, reward_file_name)
# TODO(natashamjaques): Remove print statement once tf.logging outputs
# to Jupyter notebooks (once the following issue is resolved:
# https://github.com/tensorflow/tensorflow/issues/3047)
print 'Attempting to load saved reward values from file', npz_file_name
tf.logging.info('Attempting to load saved reward values from file %s',
npz_file_name)
npz_file = np.load(npz_file_name)
self.rewards_batched = npz_file['train_rewards']
self.music_theory_rewards_batched = npz_file['train_music_theory_rewards']
self.note_rnn_rewards_batched = npz_file['train_note_rnn_rewards']
self.eval_avg_reward = npz_file['eval_rewards']
self.eval_avg_music_theory_reward = npz_file['eval_music_theory_rewards']
self.eval_avg_note_rnn_reward = npz_file['eval_note_rnn_rewards']
self.target_val_list = npz_file['target_val_list']
|
auferack08/edx-platform | refs/heads/master | lms/djangoapps/instructor/features/bulk_email.py | 23 | """
Define steps for bulk email acceptance test.
"""
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from lettuce.django import mail
from nose.tools import assert_in, assert_true, assert_equal # pylint: disable=E0611
from django.core.management import call_command
from django.conf import settings
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given there is a course with a staff, instructor and student')
def make_populated_course(step): # pylint: disable=unused-argument
## This is different than the function defined in common.py because it enrolls
## a staff, instructor, and student member regardless of what `role` is, then
## logs `role` in. This is to ensure we have 3 class participants to email.
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='888',
display_name='Bulk Email Test Course'
)
world.bulk_email_course_key = course.id
try:
# See if we've defined the instructor & staff user yet
world.bulk_email_instructor
except AttributeError:
# Make & register an instructor for the course
world.bulk_email_instructor = InstructorFactory(course_key=world.bulk_email_course_key)
world.enroll_user(world.bulk_email_instructor, world.bulk_email_course_key)
# Make & register a staff member
world.bulk_email_staff = StaffFactory(course_key=course.id)
world.enroll_user(world.bulk_email_staff, world.bulk_email_course_key)
# Make & register a student
world.register_by_course_key(
course.id,
username='student',
password='test',
is_staff=False
)
# Store the expected recipients
# given each "send to" option
staff_emails = [world.bulk_email_staff.email, world.bulk_email_instructor.email]
world.expected_addresses = {
'course staff': staff_emails,
'students, staff, and instructors': staff_emails + ['[email protected]']
}
# Dictionary mapping a description of the email recipient
# to the corresponding <option> value in the UI.
SEND_TO_OPTIONS = {
'myself': 'myself',
'course staff': 'staff',
'students, staff, and instructors': 'all'
}
@step(u'I am logged in to the course as "([^"]*)"')
def log_into_the_course(step, role): # pylint: disable=unused-argument
# Store the role
assert_in(role, ['instructor', 'staff'])
# Log in as the an instructor or staff for the course
my_email = world.bulk_email_instructor.email
if role == 'instructor':
world.log_in(
username=world.bulk_email_instructor.username,
password='test',
email=my_email,
name=world.bulk_email_instructor.profile.name
)
else:
my_email = world.bulk_email_staff.email
world.log_in(
username=world.bulk_email_staff.username,
password='test',
email=my_email,
name=world.bulk_email_staff.profile.name
)
# Store the "myself" send to option
world.expected_addresses['myself'] = [my_email]
@step(u'I send email to "([^"]*)"')
def when_i_send_an_email(step, recipient): # pylint: disable=unused-argument
# Check that the recipient is valid
assert_in(
recipient, SEND_TO_OPTIONS,
msg="Invalid recipient: {}".format(recipient)
)
# Clear the queue of existing emails
while not mail.queue.empty(): # pylint: disable=E1101
mail.queue.get() # pylint: disable=E1101
# Because we flush the database before each run,
# we need to ensure that the email template fixture
# is re-loaded into the database
call_command('loaddata', 'course_email_template.json')
# Go to the email section of the instructor dash
world.visit('/courses/edx/888/Bulk_Email_Test_Course')
world.css_click('a[href="/courses/edx/888/Bulk_Email_Test_Course/instructor"]')
world.css_click('a[data-section="send_email"]')
# Select the recipient
world.select_option('send_to', SEND_TO_OPTIONS[recipient])
# Enter subject and message
world.css_fill('input#id_subject', 'Hello')
with world.browser.get_iframe('mce_0_ifr') as iframe:
editor = iframe.find_by_id('tinymce')[0]
editor.fill('test message')
# Click send
world.css_click('input[name="send"]', dismiss_alert=True)
# Expect to see a message that the email was sent
expected_msg = "Your email was successfully queued for sending."
world.wait_for_visible('#request-response')
assert_in(
expected_msg, world.css_text('#request-response'),
msg="Could not find email success message."
)
UNSUBSCRIBE_MSG = 'To stop receiving email like this'
@step(u'Email is sent to "([^"]*)"')
def then_the_email_is_sent(step, recipient): # pylint: disable=unused-argument
# Check that the recipient is valid
assert_in(
recipient, SEND_TO_OPTIONS,
msg="Invalid recipient: {}".format(recipient)
)
# Retrieve messages. Because we are using celery in "always eager"
# mode, we expect all messages to be sent by this point.
messages = []
while not mail.queue.empty(): # pylint: disable=E1101
messages.append(mail.queue.get()) # pylint: disable=E1101
# Check that we got the right number of messages
assert_equal(
len(messages), len(world.expected_addresses[recipient]),
msg="Received {0} instead of {1} messages for {2}".format(
len(messages), len(world.expected_addresses[recipient]), recipient
)
)
# Check that the message properties were correct
recipients = []
for msg in messages:
assert_in('Hello', msg.subject)
assert_in(settings.BULK_EMAIL_DEFAULT_FROM_EMAIL, msg.from_email)
# Message body should have the message we sent
# and an unsubscribe message
assert_in('test message', msg.body)
assert_in(UNSUBSCRIBE_MSG, msg.body)
# Should have alternative HTML form
assert_equal(len(msg.alternatives), 1)
content, mime_type = msg.alternatives[0]
assert_equal(mime_type, 'text/html')
assert_in('test message', content)
assert_in(UNSUBSCRIBE_MSG, content)
# Store the recipient address so we can verify later
recipients.extend(msg.recipients())
# Check that the messages were sent to the right people
# Because "myself" can vary based on who sent the message,
# we use the world.expected_addresses dict we configured
# in an earlier step.
for addr in world.expected_addresses[recipient]:
assert_in(addr, recipients)
|
tequa/ammisoft | refs/heads/master | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/numpy/doc/constants.py | 55 | """
=========
Constants
=========
NumPy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
from __future__ import division, absolute_import, print_function
import textwrap, re
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True], dtype=bool)
>>> np.isnan([np.NZERO])
array([False], dtype=bool)
>>> np.isinf([np.NZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True], dtype=bool)
>>> np.isnan([np.PZERO])
array([False], dtype=bool)
>>> np.isinf([np.PZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
.. [1] http://en.wikipedia.org/wiki/Napier_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. const:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
|
manassolanki/erpnext | refs/heads/develop | erpnext/patches/v7_0/make_guardian.py | 23 | from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Student"):
student_table_cols = frappe.db.get_table_columns("Student")
if "father_name" in student_table_cols:
# 'Schools' module changed to the 'Education'
# frappe.reload_doc("schools", "doctype", "student")
# frappe.reload_doc("schools", "doctype", "guardian")
# frappe.reload_doc("schools", "doctype", "guardian_interest")
frappe.reload_doc("education", "doctype", "student")
frappe.reload_doc("education", "doctype", "guardian")
frappe.reload_doc("education", "doctype", "guardian_interest")
frappe.reload_doc("hr", "doctype", "interest")
fields = ["name", "father_name", "mother_name"]
if "father_email_id" in student_table_cols:
fields += ["father_email_id", "mother_email_id"]
students = frappe.get_all("Student", fields)
for stud in students:
if stud.father_name:
make_guardian(stud.father_name, stud.name, stud.father_email_id)
if stud.mother_name:
make_guardian(stud.mother_name, stud.name, stud.mother_email_id)
def make_guardian(name, student, email=None):
frappe.get_doc({
'doctype': 'Guardian',
'guardian_name': name,
'email': email,
'student': student
}).insert()
|
BT-ojossen/odoo | refs/heads/8.0 | addons/l10n_fr_hr_payroll/report/__init__.py | 424 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fiche_paye
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
xiaoshuai09/Recurrent-Point-Process | refs/heads/master | RNNPP.py | 1 | import numpy as np
import matplotlib,sys
matplotlib.use('agg')
import tensorflow as tf
from BatchIterator import PaddedDataIterator
from generation import *
##############################################################################
# parameters
BATCH_SIZE = 256 # Batch size
MAX_STEPS = 300 # maximum length of your sequence
ITERS = 30000 # how many generator iterations to train for
REG = 0.1 # tradeoff between time and mark loss
LR = 1e-4 # learning rate
TYPE = sys.argv[1] # model type: joint event timeseries
NUM_steps_timeseries = 7 # timeseries steps before one event
Timeseries_feature = 4 # time series feature size
SEED = 12345 # set graph-level seed to make the random sequences generated by all ops be repeatable across sessions
tf.set_random_seed(SEED)
np.random.seed(SEED)
##############################################################################
# prepare data
#this is just toy data to test the code.
DIM_SIZE = 7 # equal num of classes
mi = MarkedIntensityHomogenuosPoisson(DIM_SIZE)
for u in range(DIM_SIZE):
mi.initialize(1.0, u)
simulated_sequences = generate_samples_marked(mi, 15.0, 1000)
event_iterator = PaddedDataIterator(simulated_sequences,0,MARK=True,DIFF=True)
# time_series_data = np.ones((BATCH_SIZE,real_batch[0].shape[1],NUM_steps_timeseries,4))
###############################################################################
# define model
def RNNPP(rnn_inputs_event, #dims batch_size x num_steps x input_size(mark&time), pad with 0 if variable length
rnn_inputs_timeseries, # batch_size x num_steps x num_steps_timeseries x input_size, pad with 0 if variable length
seqlen, # sequence length for each sequence, tf.int32 vector
lower_triangular_ones, # lower triangular matrix
num_classes=7, # number of dimensions for event sequence
loss='mse', # loss type for time: mse and intensity, intensity loss comes from Du, etc. KDD16
start = 3, # predict forward event starting at start-th event for each sequence
reg = REG, # loss trade-off between mark and time
state_size_event = 16, # RNN state size for event sequence
state_size_timeseries = 32, #RNN state size for time series
batch_size = BATCH_SIZE,
scope_reuse=False):
epilson = tf.constant(1e-3,tf.float32)
with tf.variable_scope("RNNPP") as scope:
if scope_reuse:
scope.reuse_variables()
num_steps = tf.shape(rnn_inputs_event)[1]
event_size = tf.shape(rnn_inputs_event)[2]
y = tf.concat([rnn_inputs_event[:,1:,:],rnn_inputs_event[:,:1,:]],axis=1)
y = tf.reshape(y, [-1,event_size])
if TYPE=='joint' or TYPE=='event':
with tf.variable_scope("event") as scope:
# rnn for event sequence
rnn_input_onehot = tf.one_hot(tf.cast(rnn_inputs_event[:,:,0],tf.int32),num_classes) #when num_class is large, use tf embedding
rnn_inputs_event = tf.concat([rnn_input_onehot,rnn_inputs_event[:,:,1:]],axis=2)
cell = tf.contrib.rnn.BasicRNNCell(state_size_event) #cell = tf.contrib.rnn.LSTMCell(state_size,state_is_tuple=True) is perferred
init_state = cell.zero_state(batch_size, tf.float32)
rnn_outputs_event, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs_event, sequence_length=seqlen, initial_state=init_state)
if TYPE=='joint' or TYPE=='timeseries':
with tf.variable_scope("timeseries") as scope:
# rnn for time series
cell = tf.contrib.rnn.BasicRNNCell(state_size_timeseries) #cell = tf.contrib.rnn.LSTMCell(state_size,state_is_tuple=True) is perferred
init_state = cell.zero_state(batch_size*num_steps, tf.float32)
rnn_inputs_timeseries = tf.reshape(rnn_inputs_timeseries,[-1,NUM_steps_timeseries,Timeseries_feature])
rnn_outputs_timeseries, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs_timeseries, initial_state=init_state)
rnn_outputs_timeseries = tf.reshape(rnn_outputs_timeseries[:,-1,:],[batch_size,num_steps,state_size_timeseries])
#reshape rnn_outputs
if TYPE=='joint':
rnn_outputs = tf.concat([rnn_outputs_event,rnn_outputs_timeseries],axis=2)
elif TYPE=='event':
rnn_outputs = rnn_outputs_event
elif TYPE=='timeseries':
rnn_outputs = rnn_outputs_timeseries
rnn_outputs_shape = tf.shape(rnn_outputs)
rnn_outputs = tf.reshape(rnn_outputs, [-1, rnn_outputs_shape[-1]])
# linear layer
with tf.variable_scope('time'):
if TYPE=='joint':
W_t = tf.get_variable('Wt', [state_size_event+state_size_timeseries, 1])
elif TYPE=='event':
W_t = tf.get_variable('Wt', [state_size_event, 1])
elif TYPE=='timeseries':
W_t = tf.get_variable('Wt', [state_size_timeseries, 1])
w_t = tf.get_variable('wt', [1], initializer=tf.constant_initializer(1.0))
b_t = tf.get_variable('bt', [1], initializer=tf.constant_initializer(0.0))
if loss=='intensity':
wt = tf.cond(tf.less(tf.reshape(tf.abs(w_t),[]),epilson),lambda:tf.sign(wt)*epilson, lambda:wt) #put in wrong position before
part1 = tf.matmul(rnn_outputs, W_t) + b_t
part2 = wt*y[:,1]
time_loglike = part1 + part2 + (tf.exp(part1)-tf.exp(part1+part2))/wt
time_loss = - time_loglike
elif loss=='mse':
time_hat = tf.matmul(rnn_outputs, W_t) + b_t
time_loss = tf.abs(tf.reshape(time_hat,[-1]) - y[:,1])
# Softmax layer
with tf.variable_scope('softmax'):
if TYPE=='joint':
W_l = tf.get_variable('Wl', [state_size_event+state_size_timeseries, num_classes])
elif TYPE=='event':
W_l = tf.get_variable('Wl', [state_size_event, num_classes])
elif TYPE=='timeseries':
W_l = tf.get_variable('Wl', [state_size_timeseries, num_classes])
b_l = tf.get_variable('bl', [num_classes], initializer=tf.constant_initializer(0.0))
mark_logits = tf.matmul(rnn_outputs, W_l) + b_l
mark_true = tf.one_hot(tf.cast(y[:,0],tf.int32),num_classes)
mark_loss = tf.nn.softmax_cross_entropy_with_logits(logits=mark_logits, labels=mark_true)
total_loss = mark_loss + reg*time_loss
#length of y minus 2 to drop last prediction
seqlen_mask = tf.slice(tf.gather(lower_triangular_ones, seqlen - 2), [0, start], [batch_size, num_steps - start])
zeros_pad = tf.zeros([batch_size,start])
seqlen_mask = tf.concat([zeros_pad,seqlen_mask],axis=1)
mark_loss = tf.reshape(mark_loss,[batch_size,num_steps])
mark_loss *= seqlen_mask
# Average over actual sequence lengths.
mark_loss = tf.reduce_sum(mark_loss, axis=1)
mark_loss = tf.reduce_mean(mark_loss)
total_loss = tf.reshape(total_loss,[batch_size,num_steps])
total_loss *= seqlen_mask #why 256*256 vs 256*140
# Average over actual sequence lengths.
total_loss = tf.reduce_sum(total_loss, axis=1)
total_loss = tf.reduce_mean(total_loss)
time_loss = total_loss - mark_loss
return total_loss,mark_loss,time_loss
event_sequence = tf.placeholder(tf.float32, shape=[BATCH_SIZE, None, 2])
time_series = tf.placeholder(tf.float32, shape=[BATCH_SIZE, None, NUM_steps_timeseries, Timeseries_feature])
seqlen = tf.placeholder(tf.int32, shape=[BATCH_SIZE])
lower_triangular_ones = tf.constant(np.tril(np.ones([MAX_STEPS,MAX_STEPS])),dtype=tf.float32)
total_loss,mark_loss,time_loss = RNNPP(event_sequence,time_series,seqlen,lower_triangular_ones)
train_variables = tf.trainable_variables()
joint_variables = [v for v in train_variables if v.name.startswith("RNNPP")]
print(map(lambda x: x.op.name, joint_variables))
train_op = tf.train.RMSPropOptimizer(learning_rate=LR).minimize(total_loss, var_list=joint_variables)
##################################################################################
# run
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0, allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options))
sess.run(tf.global_variables_initializer())
# train
for it in range(ITERS):
if TYPE=='joint':
real_batch = event_iterator.next_batch(BATCH_SIZE)
time_series_data = np.ones((BATCH_SIZE,real_batch[0].shape[1],NUM_steps_timeseries,4))
total_loss_curr,mark_loss_curr,time_loss_curr, _= sess.run([total_loss,mark_loss,time_loss,train_op],
feed_dict={event_sequence:real_batch[0], seqlen:real_batch[1], time_series:time_series_data})
print ('Iter: {}; Total loss: {:.4}; Mark loss: {:.4}; Time loss: {:.4}'.format(it, total_loss_curr,mark_loss_curr,time_loss_curr))
if TYPE=='event':
real_batch = event_iterator.next_batch(BATCH_SIZE)
total_loss_curr,mark_loss_curr,time_loss_curr, _= sess.run([total_loss,mark_loss,time_loss,train_op],
feed_dict={event_sequence:real_batch[0], seqlen:real_batch[1]})
print ('Iter: {}; Total loss: {:.4}; Mark loss: {:.4}; Time loss: {:.4}'.format(it, total_loss_curr,mark_loss_curr,time_loss_curr))
if TYPE=='timeseries':
real_batch = event_iterator.next_batch(BATCH_SIZE)
time_series_data = np.ones((BATCH_SIZE,real_batch[0].shape[1],NUM_steps_timeseries,4))
total_loss_curr,mark_loss_curr,time_loss_curr, _= sess.run([total_loss,mark_loss,time_loss,train_op],
feed_dict={event_sequence:real_batch[0], seqlen:real_batch[1], time_series:time_series_data})
print ('Iter: {}; Total loss: {:.4}; Mark loss: {:.4}; Time loss: {:.4}'.format(it, total_loss_curr,mark_loss_curr,time_loss_curr))
|
nave91/teak-nbtree | refs/heads/master | graph_tools/csver.py | 1 | #!/usr/env python
from sys import *
def line(file):
l = file.readline()
if l != '':
l = l.replace('\n','')
l = l.split(' ')
l = [i for i in l if i != '' ]
return l
file = open(argv[1],'r')
s=''
j=0
while True:
lst = line(file)
if not lst:
break
j+=1
s+=str(j)+','
for i in lst:
s+=i+','
s+='\n'
print s
|
raviflipsyde/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/_pytest/assertion/__init__.py | 176 | """
support for presenting detailed information in failing assertions.
"""
import py
import os
import sys
from _pytest.monkeypatch import monkeypatch
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption('--assert',
action="store",
dest="assertmode",
choices=("rewrite", "reinterp", "plain",),
default="rewrite",
metavar="MODE",
help="""control assertion debugging tools. 'plain'
performs no assertion debugging. 'reinterp'
reinterprets assert statements after they failed
to provide assertion expression information.
'rewrite' (the default) rewrites assert
statements in test modules on import to
provide assert expression information. """)
group.addoption('--no-assert',
action="store_true",
default=False,
dest="noassert",
help="DEPRECATED equivalent to --assert=plain")
group.addoption('--nomagic', '--no-magic',
action="store_true",
default=False,
help="DEPRECATED equivalent to --assert=plain")
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
def pytest_configure(config):
mode = config.getvalue("assertmode")
if config.getvalue("noassert") or config.getvalue("nomagic"):
mode = "plain"
if mode == "rewrite":
try:
import ast # noqa
except ImportError:
mode = "reinterp"
else:
# Both Jython and CPython 2.6.0 have AST bugs that make the
# assertion rewriting hook malfunction.
if (sys.platform.startswith('java') or
sys.version_info[:3] == (2, 6, 0)):
mode = "reinterp"
if mode != "plain":
_load_modules(mode)
m = monkeypatch()
config._cleanup.append(m.undo)
m.setattr(py.builtin.builtins, 'AssertionError',
reinterpret.AssertionError) # noqa
hook = None
if mode == "rewrite":
hook = rewrite.AssertionRewritingHook() # noqa
sys.meta_path.insert(0, hook)
warn_about_missing_assertion(mode)
config._assertstate = AssertionState(config, mode)
config._assertstate.hook = hook
config._assertstate.trace("configured with mode set to %r" % (mode,))
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
hook = session.config._assertstate.hook
if hook is not None:
hook.set_session(session)
def _running_on_ci():
"""Check if we're currently running on a CI system."""
env_vars = ['CI', 'BUILD_NUMBER']
return any(var in os.environ for var in env_vars)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are dropped unless -vv was used or
running on a CI.
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right)
for new_expl in hook_result:
if new_expl:
if (sum(len(p) for p in new_expl[1:]) > 80*8 and
item.config.option.verbose < 2 and
not _running_on_ci()):
show_max = 10
truncated_lines = len(new_expl) - show_max
new_expl[show_max:] = [py.builtin._totext(
'Detailed information truncated (%d more lines)'
', use "-vv" to show' % truncated_lines)]
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = py.builtin._totext("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
hook = session.config._assertstate.hook
if hook is not None:
hook.session = None
def _load_modules(mode):
"""Lazily import assertion related code."""
global rewrite, reinterpret
from _pytest.assertion import reinterpret # noqa
if mode == "rewrite":
from _pytest.assertion import rewrite # noqa
def warn_about_missing_assertion(mode):
try:
assert False
except AssertionError:
pass
else:
if mode == "rewrite":
specifically = ("assertions which are not in test modules "
"will be ignored")
else:
specifically = "failing tests may report as passing"
sys.stderr.write("WARNING: " + specifically +
" because assert statements are not executed "
"by the underlying Python interpreter "
"(are you using python -O?)\n")
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare
|
hroncok/freeipa | refs/heads/master | ipatests/test_integration/env_config.py | 2 | # Authors:
# Petr Viktorin <[email protected]>
# Tomas Babej <[email protected]>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Support for configuring multihost testing via environment variables
This is here to support tests configured for Beaker,
such as the ones at https://github.com/freeipa/tests/
"""
import os
import json
import collections
from ipapython import ipautil
from ipatests.test_integration.config import Config, Domain
TESTHOST_PREFIX = 'TESTHOST_'
_SettingInfo = collections.namedtuple('Setting', 'name var_name default')
_setting_infos = (
# Directory on which test-specific files will be stored,
_SettingInfo('test_dir', 'IPATEST_DIR', '/root/ipatests'),
# File with root's private RSA key for SSH (default: ~/.ssh/id_rsa)
_SettingInfo('ssh_key_filename', 'IPA_ROOT_SSH_KEY', None),
# SSH password for root (used if root_ssh_key_filename is not set)
_SettingInfo('ssh_password', 'IPA_ROOT_SSH_PASSWORD', None),
_SettingInfo('admin_name', 'ADMINID', 'admin'),
_SettingInfo('admin_password', 'ADMINPW', 'Secret123'),
_SettingInfo('dirman_dn', 'ROOTDN', 'cn=Directory Manager'),
_SettingInfo('dirman_password', 'ROOTDNPWD', None),
# 8.8.8.8 is probably the best-known public DNS
_SettingInfo('dns_forwarder', 'DNSFORWARD', '8.8.8.8'),
_SettingInfo('nis_domain', 'NISDOMAIN', 'ipatest'),
_SettingInfo('ntp_server', 'NTPSERVER', None),
_SettingInfo('ad_admin_name', 'ADADMINID', 'Administrator'),
_SettingInfo('ad_admin_password', 'ADADMINPW', 'Secret123'),
_SettingInfo('ipv6', 'IPv6SETUP', False),
_SettingInfo('debug', 'IPADEBUG', False),
)
def get_global_config(env=None):
"""Create a test config from environment variables
If env is None, uses os.environ; otherwise env is an environment dict.
If IPATEST_YAML_CONFIG or IPATEST_JSON_CONFIG is set,
configuration is read from the named file.
For YAML, the PyYAML (python-yaml) library needs to be installed.
Otherwise, configuration is read from various curiously
named environment variables:
See _setting_infos for test-wide settings
MASTER_env1: FQDN of the master
REPLICA_env1: space-separated FQDNs of the replicas
CLIENT_env1: space-separated FQDNs of the clients
AD_env1: space-separated FQDNs of the Active Directories
OTHER_env1: space-separated FQDNs of other hosts
(same for _env2, _env3, etc)
BEAKERREPLICA1_IP_env1: IP address of replica 1 in env 1
(same for MASTER, CLIENT, or any extra defined ROLE)
For each machine that should be accessible to tests via extra roles,
the following environment variable is necessary:
TESTHOST_<role>_env1: FQDN of the machine with the extra role <role>
You can also optionally specify the IP address of the host:
BEAKER<role>_IP_env1: IP address of the machine of the extra role
The framework will try to resolve the hostname to its IP address
if not passed via this environment variable.
Also see env_normalize() for alternate variable names
"""
if env is None:
env = os.environ
env = dict(env)
return config_from_env(env)
def config_from_env(env):
if 'IPATEST_YAML_CONFIG' in env:
import yaml
with open(env['IPATEST_YAML_CONFIG']) as file:
confdict = yaml.safe_load(file)
return Config.from_dict(confdict)
if 'IPATEST_JSON_CONFIG' in env:
with open(env['IPATEST_JSON_CONFIG']) as file:
confdict = json.load(file)
return Config.from_dict(confdict)
env_normalize(env)
kwargs = {s.name: env.get(s.var_name, s.default)
for s in _setting_infos}
kwargs['domains'] = []
# $IPv6SETUP needs to be 'TRUE' to enable ipv6
if isinstance(kwargs['ipv6'], basestring):
kwargs['ipv6'] = (kwargs['ipv6'].upper() == 'TRUE')
config = Config(**kwargs)
# Either IPA master or AD can define a domain
domain_index = 1
while (env.get('MASTER_env%s' % domain_index) or
env.get('AD_env%s' % domain_index)):
if env.get('MASTER_env%s' % domain_index):
# IPA domain takes precedence to AD domain in case of conflict
config.domains.append(domain_from_env(env, config, domain_index,
domain_type='IPA'))
else:
config.domains.append(domain_from_env(env, config, domain_index,
domain_type='AD'))
domain_index += 1
return config
def config_to_env(config, simple=True):
"""Convert this test config into environment variables"""
try:
env = collections.OrderedDict()
except AttributeError:
# Older Python versions
env = {}
for setting in _setting_infos:
value = getattr(config, setting.name)
if value in (None, False):
env[setting.var_name] = ''
elif value is True:
env[setting.var_name] = 'TRUE'
else:
env[setting.var_name] = str(value)
for domain in config.domains:
env_suffix = '_env%s' % (config.domains.index(domain) + 1)
env['DOMAIN%s' % env_suffix] = domain.name
env['RELM%s' % env_suffix] = domain.realm
env['BASEDN%s' % env_suffix] = str(domain.basedn)
for role in domain.roles:
hosts = domain.hosts_by_role(role)
prefix = ('' if role in domain.static_roles
else TESTHOST_PREFIX)
hostnames = ' '.join(h.hostname for h in hosts)
env['%s%s%s' % (prefix, role.upper(), env_suffix)] = hostnames
ext_hostnames = ' '.join(h.external_hostname for h in hosts)
env['BEAKER%s%s' % (role.upper(), env_suffix)] = ext_hostnames
ips = ' '.join(h.ip for h in hosts)
env['BEAKER%s_IP%s' % (role.upper(), env_suffix)] = ips
for i, host in enumerate(hosts, start=1):
suffix = '%s%s' % (role.upper(), i)
prefix = ('' if role in domain.static_roles
else TESTHOST_PREFIX)
ext_hostname = host.external_hostname
env['%s%s%s' % (prefix, suffix,
env_suffix)] = host.hostname
env['BEAKER%s%s' % (suffix, env_suffix)] = ext_hostname
env['BEAKER%s_IP%s' % (suffix, env_suffix)] = host.ip
if simple:
# Simple Vars for simplicity and backwards compatibility with older
# tests. This means no _env<NUM> suffix.
if config.domains:
default_domain = config.domains[0]
if default_domain.master:
env['MASTER'] = default_domain.master.hostname
env['BEAKERMASTER'] = default_domain.master.external_hostname
env['MASTERIP'] = default_domain.master.ip
if default_domain.replicas:
env['SLAVE'] = env['REPLICA'] = env['REPLICA_env1']
env['BEAKERSLAVE'] = env['BEAKERREPLICA_env1']
env['SLAVEIP'] = env['BEAKERREPLICA_IP_env1']
if default_domain.clients:
client = default_domain.clients[0]
env['CLIENT'] = client.hostname
env['BEAKERCLIENT'] = client.external_hostname
if len(default_domain.clients) >= 2:
client = default_domain.clients[1]
env['CLIENT2'] = client.hostname
env['BEAKERCLIENT2'] = client.external_hostname
return env
def env_normalize(env):
"""Fill env variables from alternate variable names
MASTER_env1 <- MASTER
REPLICA_env1 <- REPLICA, SLAVE
CLIENT_env1 <- CLIENT
similarly for BEAKER* variants: BEAKERMASTER1_env1 <- BEAKERMASTER, etc.
CLIENT_env1 gets extended with CLIENT2 or CLIENT2_env1
"""
def coalesce(name, *other_names):
"""If name is not set, set it to first existing env[other_name]"""
if name not in env:
for other_name in other_names:
try:
env[name] = env[other_name]
except KeyError:
pass
else:
return
else:
env[name] = ''
coalesce('MASTER_env1', 'MASTER')
coalesce('REPLICA_env1', 'REPLICA', 'SLAVE')
coalesce('CLIENT_env1', 'CLIENT')
coalesce('BEAKERMASTER1_env1', 'BEAKERMASTER')
coalesce('BEAKERREPLICA1_env1', 'BEAKERREPLICA', 'BEAKERSLAVE')
coalesce('BEAKERCLIENT1_env1', 'BEAKERCLIENT')
def extend(name, name2):
value = env.get(name2)
if value and value not in env[name].split(' '):
env[name] += ' ' + value
extend('CLIENT_env1', 'CLIENT2')
extend('CLIENT_env1', 'CLIENT2_env1')
def domain_from_env(env, config, index, domain_type):
# Roles available in the domain depend on the type of the domain
# Unix machines are added only to the IPA domains, Windows machines
# only to the AD domains
if domain_type == 'IPA':
master_role = 'MASTER'
else:
master_role = 'AD'
env_suffix = '_env%s' % index
master_env = '%s%s' % (master_role, env_suffix)
hostname, dot, domain_name = env[master_env].partition('.')
domain = Domain(config, domain_name, domain_type)
for role in _roles_from_env(domain, env, env_suffix):
prefix = '' if role in domain.static_roles else TESTHOST_PREFIX
value = env.get('%s%s%s' % (prefix, role.upper(), env_suffix), '')
for host_index, hostname in enumerate(value.split(), start=1):
host = host_from_env(env, domain, hostname, role,
host_index, index)
domain.hosts.append(host)
if not domain.hosts:
raise ValueError('No hosts defined for %s' % env_suffix)
return domain
def _roles_from_env(domain, env, env_suffix):
for role in domain.static_roles:
yield role
# Extra roles are defined via env variables of form TESTHOST_key_envX
roles = set()
for var in sorted(env):
if var.startswith(TESTHOST_PREFIX) and var.endswith(env_suffix):
variable_split = var.split('_')
role_name = '_'.join(variable_split[1:-1])
if (role_name and not role_name[-1].isdigit()):
roles.add(role_name.lower())
for role in sorted(roles):
yield role
def domain_to_env(domain, **kwargs):
"""Return environment variables specific to this domain"""
env = domain.config.to_env(**kwargs)
env['DOMAIN'] = domain.name
env['RELM'] = domain.realm
env['BASEDN'] = str(domain.basedn)
return env
def host_from_env(env, domain, hostname, role, index, domain_index):
ip = env.get('BEAKER%s%s_IP_env%s' %
(role.upper(), index, domain_index), None)
external_hostname = env.get(
'BEAKER%s%s_env%s' % (role.upper(), index, domain_index), None)
cls = domain.get_host_class({})
return cls(domain, hostname, role, ip, external_hostname)
def host_to_env(host, **kwargs):
"""Return environment variables specific to this host"""
env = host.domain.to_env(**kwargs)
index = host.domain.hosts.index(host) + 1
domain_index = host.config.domains.index(host.domain) + 1
role = host.role.upper()
if host.role != 'master':
role += str(index)
env['MYHOSTNAME'] = host.hostname
env['MYBEAKERHOSTNAME'] = host.external_hostname
env['MYIP'] = host.ip
prefix = ('' if host.role in host.domain.static_roles
else TESTHOST_PREFIX)
env_suffix = '_env%s' % domain_index
env['MYROLE'] = '%s%s%s' % (prefix, role, env_suffix)
env['MYENV'] = str(domain_index)
return env
def env_to_script(env):
return ''.join(['export %s=%s\n' % (key, ipautil.shell_quote(value))
for key, value in env.items()])
|
catapult-project/catapult-csm | refs/heads/master | third_party/gsutil/third_party/pyasn1/pyasn1/type/namedtype.py | 200 | # NamedType specification for constructed types
import sys
from pyasn1.type import tagmap
from pyasn1 import error
class NamedType:
isOptional = 0
isDefaulted = 0
def __init__(self, name, t):
self.__name = name; self.__type = t
def __repr__(self): return '%s(%s, %s)' % (
self.__class__.__name__, self.__name, self.__type
)
def getType(self): return self.__type
def getName(self): return self.__name
def __getitem__(self, idx):
if idx == 0: return self.__name
if idx == 1: return self.__type
raise IndexError()
class OptionalNamedType(NamedType):
isOptional = 1
class DefaultedNamedType(NamedType):
isDefaulted = 1
class NamedTypes:
def __init__(self, *namedTypes):
self.__namedTypes = namedTypes
self.__namedTypesLen = len(self.__namedTypes)
self.__minTagSet = None
self.__tagToPosIdx = {}; self.__nameToPosIdx = {}
self.__tagMap = { False: None, True: None }
self.__ambigiousTypes = {}
def __repr__(self):
r = '%s(' % self.__class__.__name__
for n in self.__namedTypes:
r = r + '%r, ' % (n,)
return r + ')'
def __getitem__(self, idx): return self.__namedTypes[idx]
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self.__namedTypesLen)
else:
def __bool__(self): return bool(self.__namedTypesLen)
def __len__(self): return self.__namedTypesLen
def getTypeByPosition(self, idx):
if idx < 0 or idx >= self.__namedTypesLen:
raise error.PyAsn1Error('Type position out of range')
else:
return self.__namedTypes[idx].getType()
def getPositionByType(self, tagSet):
if not self.__tagToPosIdx:
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
tagMap = self.__namedTypes[idx].getType().getTagMap()
for t in tagMap.getPosMap():
if t in self.__tagToPosIdx:
raise error.PyAsn1Error('Duplicate type %s' % (t,))
self.__tagToPosIdx[t] = idx
try:
return self.__tagToPosIdx[tagSet]
except KeyError:
raise error.PyAsn1Error('Type %s not found' % (tagSet,))
def getNameByPosition(self, idx):
try:
return self.__namedTypes[idx].getName()
except IndexError:
raise error.PyAsn1Error('Type position out of range')
def getPositionByName(self, name):
if not self.__nameToPosIdx:
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
n = self.__namedTypes[idx].getName()
if n in self.__nameToPosIdx:
raise error.PyAsn1Error('Duplicate name %s' % (n,))
self.__nameToPosIdx[n] = idx
try:
return self.__nameToPosIdx[name]
except KeyError:
raise error.PyAsn1Error('Name %s not found' % (name,))
def __buildAmbigiousTagMap(self):
ambigiousTypes = ()
idx = self.__namedTypesLen
while idx > 0:
idx = idx - 1
t = self.__namedTypes[idx]
if t.isOptional or t.isDefaulted:
ambigiousTypes = (t, ) + ambigiousTypes
else:
ambigiousTypes = (t, )
self.__ambigiousTypes[idx] = NamedTypes(*ambigiousTypes)
def getTagMapNearPosition(self, idx):
if not self.__ambigiousTypes: self.__buildAmbigiousTagMap()
try:
return self.__ambigiousTypes[idx].getTagMap()
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def getPositionNearType(self, tagSet, idx):
if not self.__ambigiousTypes: self.__buildAmbigiousTagMap()
try:
return idx+self.__ambigiousTypes[idx].getPositionByType(tagSet)
except KeyError:
raise error.PyAsn1Error('Type position out of range')
def genMinTagSet(self):
if self.__minTagSet is None:
for t in self.__namedTypes:
__type = t.getType()
tagSet = getattr(__type,'getMinTagSet',__type.getTagSet)()
if self.__minTagSet is None or tagSet < self.__minTagSet:
self.__minTagSet = tagSet
return self.__minTagSet
def getTagMap(self, uniq=False):
if self.__tagMap[uniq] is None:
tagMap = tagmap.TagMap()
for nt in self.__namedTypes:
tagMap = tagMap.clone(
nt.getType(), nt.getType().getTagMap(), uniq
)
self.__tagMap[uniq] = tagMap
return self.__tagMap[uniq]
|
FireBladeNooT/Medusa_1_6 | refs/heads/master | lib/unidecode/x05c.py | 252 | data = (
'Po ', # 0x00
'Feng ', # 0x01
'Zhuan ', # 0x02
'Fu ', # 0x03
'She ', # 0x04
'Ke ', # 0x05
'Jiang ', # 0x06
'Jiang ', # 0x07
'Zhuan ', # 0x08
'Wei ', # 0x09
'Zun ', # 0x0a
'Xun ', # 0x0b
'Shu ', # 0x0c
'Dui ', # 0x0d
'Dao ', # 0x0e
'Xiao ', # 0x0f
'Ji ', # 0x10
'Shao ', # 0x11
'Er ', # 0x12
'Er ', # 0x13
'Er ', # 0x14
'Ga ', # 0x15
'Jian ', # 0x16
'Shu ', # 0x17
'Chen ', # 0x18
'Shang ', # 0x19
'Shang ', # 0x1a
'Mo ', # 0x1b
'Ga ', # 0x1c
'Chang ', # 0x1d
'Liao ', # 0x1e
'Xian ', # 0x1f
'Xian ', # 0x20
'[?] ', # 0x21
'Wang ', # 0x22
'Wang ', # 0x23
'You ', # 0x24
'Liao ', # 0x25
'Liao ', # 0x26
'Yao ', # 0x27
'Mang ', # 0x28
'Wang ', # 0x29
'Wang ', # 0x2a
'Wang ', # 0x2b
'Ga ', # 0x2c
'Yao ', # 0x2d
'Duo ', # 0x2e
'Kui ', # 0x2f
'Zhong ', # 0x30
'Jiu ', # 0x31
'Gan ', # 0x32
'Gu ', # 0x33
'Gan ', # 0x34
'Tui ', # 0x35
'Gan ', # 0x36
'Gan ', # 0x37
'Shi ', # 0x38
'Yin ', # 0x39
'Chi ', # 0x3a
'Kao ', # 0x3b
'Ni ', # 0x3c
'Jin ', # 0x3d
'Wei ', # 0x3e
'Niao ', # 0x3f
'Ju ', # 0x40
'Pi ', # 0x41
'Ceng ', # 0x42
'Xi ', # 0x43
'Bi ', # 0x44
'Ju ', # 0x45
'Jie ', # 0x46
'Tian ', # 0x47
'Qu ', # 0x48
'Ti ', # 0x49
'Jie ', # 0x4a
'Wu ', # 0x4b
'Diao ', # 0x4c
'Shi ', # 0x4d
'Shi ', # 0x4e
'Ping ', # 0x4f
'Ji ', # 0x50
'Xie ', # 0x51
'Chen ', # 0x52
'Xi ', # 0x53
'Ni ', # 0x54
'Zhan ', # 0x55
'Xi ', # 0x56
'[?] ', # 0x57
'Man ', # 0x58
'E ', # 0x59
'Lou ', # 0x5a
'Ping ', # 0x5b
'Ti ', # 0x5c
'Fei ', # 0x5d
'Shu ', # 0x5e
'Xie ', # 0x5f
'Tu ', # 0x60
'Lu ', # 0x61
'Lu ', # 0x62
'Xi ', # 0x63
'Ceng ', # 0x64
'Lu ', # 0x65
'Ju ', # 0x66
'Xie ', # 0x67
'Ju ', # 0x68
'Jue ', # 0x69
'Liao ', # 0x6a
'Jue ', # 0x6b
'Shu ', # 0x6c
'Xi ', # 0x6d
'Che ', # 0x6e
'Tun ', # 0x6f
'Ni ', # 0x70
'Shan ', # 0x71
'[?] ', # 0x72
'Xian ', # 0x73
'Li ', # 0x74
'Xue ', # 0x75
'Nata ', # 0x76
'[?] ', # 0x77
'Long ', # 0x78
'Yi ', # 0x79
'Qi ', # 0x7a
'Ren ', # 0x7b
'Wu ', # 0x7c
'Han ', # 0x7d
'Shen ', # 0x7e
'Yu ', # 0x7f
'Chu ', # 0x80
'Sui ', # 0x81
'Qi ', # 0x82
'[?] ', # 0x83
'Yue ', # 0x84
'Ban ', # 0x85
'Yao ', # 0x86
'Ang ', # 0x87
'Ya ', # 0x88
'Wu ', # 0x89
'Jie ', # 0x8a
'E ', # 0x8b
'Ji ', # 0x8c
'Qian ', # 0x8d
'Fen ', # 0x8e
'Yuan ', # 0x8f
'Qi ', # 0x90
'Cen ', # 0x91
'Qian ', # 0x92
'Qi ', # 0x93
'Cha ', # 0x94
'Jie ', # 0x95
'Qu ', # 0x96
'Gang ', # 0x97
'Xian ', # 0x98
'Ao ', # 0x99
'Lan ', # 0x9a
'Dao ', # 0x9b
'Ba ', # 0x9c
'Zuo ', # 0x9d
'Zuo ', # 0x9e
'Yang ', # 0x9f
'Ju ', # 0xa0
'Gang ', # 0xa1
'Ke ', # 0xa2
'Gou ', # 0xa3
'Xue ', # 0xa4
'Bei ', # 0xa5
'Li ', # 0xa6
'Tiao ', # 0xa7
'Ju ', # 0xa8
'Yan ', # 0xa9
'Fu ', # 0xaa
'Xiu ', # 0xab
'Jia ', # 0xac
'Ling ', # 0xad
'Tuo ', # 0xae
'Pei ', # 0xaf
'You ', # 0xb0
'Dai ', # 0xb1
'Kuang ', # 0xb2
'Yue ', # 0xb3
'Qu ', # 0xb4
'Hu ', # 0xb5
'Po ', # 0xb6
'Min ', # 0xb7
'An ', # 0xb8
'Tiao ', # 0xb9
'Ling ', # 0xba
'Chi ', # 0xbb
'Yuri ', # 0xbc
'Dong ', # 0xbd
'Cem ', # 0xbe
'Kui ', # 0xbf
'Xiu ', # 0xc0
'Mao ', # 0xc1
'Tong ', # 0xc2
'Xue ', # 0xc3
'Yi ', # 0xc4
'Kura ', # 0xc5
'He ', # 0xc6
'Ke ', # 0xc7
'Luo ', # 0xc8
'E ', # 0xc9
'Fu ', # 0xca
'Xun ', # 0xcb
'Die ', # 0xcc
'Lu ', # 0xcd
'An ', # 0xce
'Er ', # 0xcf
'Gai ', # 0xd0
'Quan ', # 0xd1
'Tong ', # 0xd2
'Yi ', # 0xd3
'Mu ', # 0xd4
'Shi ', # 0xd5
'An ', # 0xd6
'Wei ', # 0xd7
'Hu ', # 0xd8
'Zhi ', # 0xd9
'Mi ', # 0xda
'Li ', # 0xdb
'Ji ', # 0xdc
'Tong ', # 0xdd
'Wei ', # 0xde
'You ', # 0xdf
'Sang ', # 0xe0
'Xia ', # 0xe1
'Li ', # 0xe2
'Yao ', # 0xe3
'Jiao ', # 0xe4
'Zheng ', # 0xe5
'Luan ', # 0xe6
'Jiao ', # 0xe7
'E ', # 0xe8
'E ', # 0xe9
'Yu ', # 0xea
'Ye ', # 0xeb
'Bu ', # 0xec
'Qiao ', # 0xed
'Qun ', # 0xee
'Feng ', # 0xef
'Feng ', # 0xf0
'Nao ', # 0xf1
'Li ', # 0xf2
'You ', # 0xf3
'Xian ', # 0xf4
'Hong ', # 0xf5
'Dao ', # 0xf6
'Shen ', # 0xf7
'Cheng ', # 0xf8
'Tu ', # 0xf9
'Geng ', # 0xfa
'Jun ', # 0xfb
'Hao ', # 0xfc
'Xia ', # 0xfd
'Yin ', # 0xfe
'Yu ', # 0xff
)
|
asa1253/portia | refs/heads/master | slybot/slybot/linkextractor/ecsv.py | 9 | import csv
from cStringIO import StringIO
from scrapy.link import Link
from .base import BaseLinkExtractor
# see http://docs.python.org/2/library/csv.html#csv-fmt-params
_FORMAT_PARAMETERS = (
('delimiter', ','),
('quotechar', '"'),
('doublequote', True),
('escapechar', None),
('lineterminator', '\r\n'),
('skipinitialspace', False),
('strict', False),
)
class CsvLinkExtractor(BaseLinkExtractor):
def __init__(self, column=0, **kwargs):
self.fmtparams = dict((key, kwargs.pop(key, default)) for key, default in _FORMAT_PARAMETERS)
for key, val in self.fmtparams.items():
if isinstance(val, unicode):
self.fmtparams[key] = val.encode()
super(CsvLinkExtractor, self).__init__(**kwargs)
self.allowed_schemes = filter(lambda x: x and isinstance(x, basestring), self.allowed_schemes)
self.column = column
def _extract_links(self, response):
buff = StringIO(response.body)
reader = csv.reader(buff, **self.fmtparams)
for row in reader:
if len(row) > self.column:
yield Link(row[self.column])
|
niaquinto/AdvisorTool | refs/heads/master | server.py | 1 | #!/usr/bin/python3
import cherrypy
import app
def run_prod_server(app):
# Mount the WSGI callable object (app) on the root directory
cherrypy.tree.graft(app, '/')
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload_on': True,
'server.socket_port': 8080,
'server.socket_host': '0.0.0.0'
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
def run_dev_server(app):
# Start built-in Werkzeug development server
app.run(host='0.0.0.0', port=8080)
if __name__ == '__main__':
if app.app.config['DEBUG']:
run_dev_server(app.app)
else:
run_prod_server(app.app)
|
Fulla/django-nestedgroupedlists | refs/heads/master | example/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
nanolearningllc/edx-platform-cypress-2 | refs/heads/master | lms/djangoapps/instructor_task/migrations/0001_initial.py | 114 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InstructorTask'
db.create_table('instructor_task_instructortask', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_input', self.gf('django.db.models.fields.CharField')(max_length=255)),
('task_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('instructor_task', ['InstructorTask'])
def backwards(self, orm):
# Deleting model 'InstructorTask'
db.delete_table('instructor_task_instructortask')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'instructor_task.instructortask': {
'Meta': {'object_name': 'InstructorTask'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['instructor_task']
|
dogecoin/dogecoin | refs/heads/master | qa/rpc-tests/p2p-segwit.py | 20 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
'''
SegWit p2p test.
'''
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
# Note: we can reduce code by using SingleNodeConnCB (in master, not 0.12)
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.sleep_time = 0.05
self.getdataset = set()
self.last_reject = None
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_pong(self, conn, message):
self.last_pong = message
def on_reject(self, conn, message):
self.last_reject = message
#print (message)
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_getdata != None
self.sync(test_function, timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
self.sync(test_function, timeout)
def wait_for_inv(self, expected_inv, timeout=60):
test_function = lambda: self.last_inv != expected_inv
self.sync(test_function, timeout)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_getdata = None
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
return
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_getdata = None
self.last_getheaders = None
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
return
def announce_block(self, block, use_header):
with mininode_lock:
self.last_getdata = None
if use_header:
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_block = None
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_reject.reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1"]))
# Start a node for testing IsStandard rules.
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1", "-acceptnonstdtxn=0"]))
connect_nodes(self.nodes[0], 1)
# Disable segwit's bip9 parameter to simulate upgrading after activation.
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]))
connect_nodes(self.nodes[0], 2)
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=4):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
print("\tVerifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
print("\tTesting non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
print("\tTesting behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_reject.reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
print("\tTesting witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
print("\tTesting witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
print("\tTesting witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
print("\tTesting extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
print("\tTesting maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
print("\tTesting maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
print("\tTesting witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_getdata.inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
print("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv(CInv(1, tx2.sha256)) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv(CInv(1, tx3.sha256))
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
print("\tTesting block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
block2 = self.build_next_block(nVersion=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(nVersion=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
self.old_node.announce_block(block4, use_header=False)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
print("\tTesting standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
print("\tTesting standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_reject.reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
print("\tTesting premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
print("\tTesting segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
print("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
print("\tTesting P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node, node_id):
print("\tTesting software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
stop_node(node, node_id)
self.nodes[node_id] = start_node(node_id, self.options.tmpdir, ["-debug"])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(node, 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = node.getblockcount()
while height >= 0:
block_hash = node.getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
print("\tTesting sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
print("\tTesting getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# TODO: this duplicates some code from blocktools.py, would be nice
# to refactor.
# Check that default_witness_commitment is present.
block = CBlock()
witness_root = block.get_merkle_root([ser_uint256(0), ser_uint256(txid)])
check_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(0)))
from test_framework.blocktools import WITNESS_COMMITMENT_HEADER
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(check_commitment)
script = CScript([OP_RETURN, output_data])
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
print("\tTesting uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
print("\tTesting detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
print("\nStarting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
print("\nTesting behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
print("\nTesting behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(self.nodes[2], 2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
|
nwalters512/the-blue-alliance | refs/heads/master | helpers/notification_helper.py | 4 | import datetime
import json
from consts.client_type import ClientType
from consts.notification_type import NotificationType
from controllers.api.api_status_controller import ApiStatusController
from helpers.push_helper import PushHelper
from models.event import Event
from models.sitevar import Sitevar
from notifications.alliance_selections import AllianceSelectionNotification
from notifications.level_starting import CompLevelStartingNotification
from notifications.broadcast import BroadcastNotification
from notifications.match_score import MatchScoreNotification
from notifications.match_video import MatchVideoNotification, EventMatchVideoNotification
from notifications.awards_updated import AwardsUpdatedNotification
from notifications.schedule_updated import ScheduleUpdatedNotification
from notifications.upcoming_match import UpcomingMatchNotification
from notifications.update_favorites import UpdateFavoritesNotification
from notifications.update_subscriptions import UpdateSubscriptionsNotification
from notifications.verification import VerificationNotification
class NotificationHelper(object):
"""
Helper class for sending push notifications.
Methods here should build a Notification object and use their send method
"""
@classmethod
def send_match_score_update(cls, match):
users = PushHelper.get_users_subscribed_to_match(match, NotificationType.MATCH_SCORE)
keys = PushHelper.get_client_ids_for_users(users)
notification = MatchScoreNotification(match)
notification.send(keys)
@classmethod
def send_favorite_update(cls, user_id, sending_device_key=""):
clients = PushHelper.get_client_ids_for_users([user_id])
notification = UpdateFavoritesNotification(user_id, sending_device_key)
notification.send(clients)
@classmethod
def send_subscription_update(cls, user_id, sending_device_key=""):
clients = PushHelper.get_client_ids_for_users([user_id])
notification = UpdateSubscriptionsNotification(user_id, sending_device_key)
notification.send(clients)
@classmethod
def send_upcoming_match_notification(cls, match, event):
users = PushHelper.get_users_subscribed_to_match(match, NotificationType.UPCOMING_MATCH)
keys = PushHelper.get_client_ids_for_users(users)
if match.set_number == 1 and match.match_number == 1:
# First match of a new type, send level starting notifications
start_users = PushHelper.get_users_subscribed_to_match(match, NotificationType.LEVEL_STARTING)
start_keys = PushHelper.get_client_ids_for_users(start_users)
level_start = CompLevelStartingNotification(match, event)
level_start.send(start_keys)
# Send upcoming match notification
notification = UpcomingMatchNotification(match, event)
notification.send(keys)
match.push_sent = True # Make sure we don't send updates for this match again
match.dirty = True
from helpers.match_manipulator import MatchManipulator
MatchManipulator.createOrUpdate(match)
@classmethod
def send_upcoming_matches(cls, live_events):
from helpers.match_helper import MatchHelper # PJL: Hacky :P
# Causes circular import, otherwise
# https://github.com/the-blue-alliance/the-blue-alliance/pull/1098#discussion_r25128966
down_events = []
now = datetime.datetime.utcnow()
for event in live_events:
matches = event.matches
if not matches:
continue
last_matches = MatchHelper.recentMatches(matches, num=1)
next_matches = MatchHelper.upcomingMatches(matches, num=2)
# First, compare the difference between scheduled times of next/last match
# Send an upcoming notification if it's <10 minutes, to account for events ahead of schedule
if last_matches != []:
last_match = last_matches[0]
for i, next_match in enumerate(next_matches):
if not next_match.push_sent and last_match.time and next_match.time:
diff = next_match.time - last_match.time
if diff < datetime.timedelta(minutes=10 * (i + 1)):
cls.send_upcoming_match_notification(next_match, event)
for match in next_matches:
if match and not match.push_sent:
# Only continue sending for the next match if a push hasn't already been sent for it
if match.time is None or match.time + datetime.timedelta(minutes=-7) <= now:
# Only send notifications for matches no more than 7 minutes (average-ish match cycle time) before it's scheduled to start
# Unless, the match has no time info. Then #yolo and send it
cls.send_upcoming_match_notification(match, event)
# Determine if event is down
if cls.is_event_down(last_matches[0] if last_matches else None, next_matches[0] if next_matches else None):
down_events.append(event.key_name)
# Update the status sitevar
status_sitevar = Sitevar.get_by_id('apistatus.down_events')
if status_sitevar is None:
status_sitevar = Sitevar(id="apistatus.down_events", description="A list of down event keys", values_json="[]")
old_status = status_sitevar.contents
status_sitevar.contents = down_events
status_sitevar.put()
# Clear API Response cache
ApiStatusController.clear_cache_if_needed(old_status, down_events)
@classmethod
def send_schedule_update(cls, event):
users = PushHelper.get_users_subscribed_to_event(event, NotificationType.SCHEDULE_UPDATED)
keys = PushHelper.get_client_ids_for_users(users)
notification = ScheduleUpdatedNotification(event)
notification.send(keys)
@classmethod
def send_alliance_update(cls, event):
users = PushHelper.get_users_subscribed_for_alliances(event, NotificationType.ALLIANCE_SELECTION)
keys = PushHelper.get_client_ids_for_users(users)
notification = AllianceSelectionNotification(event)
notification.send(keys)
@classmethod
def send_award_update(cls, event):
users = PushHelper.get_users_subscribed_to_event(event, NotificationType.AWARDS)
keys = PushHelper.get_client_ids_for_users(users)
notification = AwardsUpdatedNotification(event)
notification.send(keys)
@classmethod
def send_match_video(cls, match):
"""
Sends match_video and event_match_video notifications
If the match is current, MatchVideoNotification is sent.
Otherwise, EventMatchVideoNotification is sent
"""
match_users = set(PushHelper.get_users_subscribed_to_match(match, NotificationType.MATCH_VIDEO))
event_users = set(PushHelper.get_users_subscribed_to_event(match.event.get(), NotificationType.MATCH_VIDEO))
users = match_users.union(event_users)
if match.within_seconds(60*10):
user_keys = PushHelper.get_client_ids_for_users(users)
MatchVideoNotification(match).send(user_keys)
else:
user_keys = PushHelper.get_client_ids_for_users(users)
EventMatchVideoNotification(match).send(user_keys)
@classmethod
def send_broadcast(cls, client_types, title, message, url, app_version=''):
users = PushHelper.get_all_mobile_clients(client_types)
keys = PushHelper.get_client_ids_for_users(users)
notification = BroadcastNotification(title, message, url, app_version)
notification.send(keys)
@classmethod
def verify_webhook(cls, url, secret):
key = {ClientType.WEBHOOK: [(url, secret)]}
notification = VerificationNotification(url, secret)
notification.send(key)
return notification.verification_key
@classmethod
def is_event_down(cls, last_match, next_match):
"""
Determines if an event's reporting is "down".
Conditions should be pretty tight, don't want false positives
Both next and last match need to be on the same day.
Use scheduled/actual start times for last to determine schedule offset
After max(predicted, scheduled) for next + threshold, event is down
"""
if not last_match or not next_match:
return False
if not last_match.time or not last_match.actual_time or not next_match.time:
# Don't cause false positives when we're missing data
return False
if not last_match.time.day == next_match.time.day:
# Events are on different days, all bets are off
return False
now = datetime.datetime.utcnow()
threshold = datetime.timedelta(minutes=30) # we can tune this
schedule_offset = last_match.actual_time - last_match.time
predicted_start = max(next_match.time + schedule_offset, next_match.time)
if now > predicted_start + threshold:
# Event is down :(
return True
return False
|
invenia/iterparse | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
from setuptools import setup, find_packages
import re
package_name = 'iterparse'
version = ''
with open('{}/__init__.py'.format(package_name), 'r') as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*?)[\'"]',
f.read(), re.MULTILINE,
).group(1)
if not version:
raise RuntimeError('Cannot find version information')
setup(
name=package_name,
version=version,
packages=find_packages(),
install_requires=(
'lxml',
),
)
|
vrv/tensorflow | refs/heads/master | tensorflow/python/tools/strip_unused.py | 180 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes unneeded nodes from a GraphDef file.
This script is designed to help streamline models, by taking the input and
output nodes that will be used by an application and figuring out the smallest
set of operations that are required to run for those arguments. The resulting
minimal graph is then saved out.
The advantages of running this script are:
- You may be able to shrink the file size.
- Operations that are unsupported on your platform but still present can be
safely removed.
The resulting graph may not be as flexible as the original though, since any
input nodes that weren't explicitly mentioned may not be accessible any more.
An example of command-line usage is:
bazel build tensorflow/python/tools:strip_unused && \
bazel-bin/tensorflow/python/tools/strip_unused \
--input_graph=some_graph_def.pb \
--output_graph=/tmp/stripped_graph.pb \
--input_node_names=input0
--output_node_names=softmax
You can also look at strip_unused_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import app
from tensorflow.python.tools import strip_unused_lib
FLAGS = None
def main(unused_args):
strip_unused_lib.strip_unused_from_files(FLAGS.input_graph,
FLAGS.input_binary,
FLAGS.output_graph,
FLAGS.output_binary,
FLAGS.input_node_names,
FLAGS.output_node_names,
FLAGS.placeholder_type_enum)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--input_graph',
type=str,
default='',
help='TensorFlow \'GraphDef\' file to load.')
parser.add_argument(
'--input_binary',
nargs='?',
const=True,
type='bool',
default=False,
help='Whether the input files are in binary format.')
parser.add_argument(
'--output_graph',
type=str,
default='',
help='Output \'GraphDef\' file name.')
parser.add_argument(
'--output_binary',
nargs='?',
const=True,
type='bool',
default=True,
help='Whether to write a binary format graph.')
parser.add_argument(
'--input_node_names',
type=str,
default='',
help='The name of the input nodes, comma separated.')
parser.add_argument(
'--output_node_names',
type=str,
default='',
help='The name of the output nodes, comma separated.')
parser.add_argument(
'--placeholder_type_enum',
type=int,
default=dtypes.float32.as_datatype_enum,
help='The AttrValue enum to use for placeholders.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
mfalkiewicz/pySTATIS | refs/heads/master | pySTATIS/wine_data.py | 1 | import numpy as np
from pySTATIS.statis import STATISData
X1_name = 'Sub-01'
X1 = np.array([[8, 6, 7, 4, 1, 6],
[7, 5, 8, 1, 2, 8],
[6, 5, 6, 5, 3, 4],
[9, 6, 8, 4, 3, 5],
[2, 2, 2, 8, 7, 3],
[3, 4, 4, 9, 6, 1],
[5, 3, 5, 4, 8, 3],
[5, 2, 4, 8, 7, 4],
[8, 6, 8, 4, 4, 7],
[4, 6, 2, 5, 3, 4],
[8, 4, 8, 1, 3, 3],
[5, 3, 6, 4, 4, 2]])
X1_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral', 'Smoky', 'Citrus'],
X1_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X2_name = 'Sub-02'
X2 = np.array([[8, 6, 8, 3, 7, 5],
[6, 5, 6, 3, 7, 7],
[6, 6, 6, 5, 8, 7],
[8, 6, 8, 4, 6, 6],
[2, 3, 1, 7, 4, 3],
[4, 3, 4, 9, 3, 5],
[3, 3, 2, 7, 4, 4],
[4, 3, 5, 5, 3, 3],
[8, 6, 9, 5, 5, 6],
[5, 5, 5, 6, 5, 8],
[8, 4, 8, 3, 7, 7],
[5, 3, 7, 4, 8, 5]])
X2_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral', 'Tropical', 'Leafy'],
X2_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X3_name = 'Sub-03'
X3 = np.array([[8, 6, 8, 3, 7, 2],
[8, 7, 7, 2, 8, 2],
[8, 7, 7, 6, 9, 1],
[8, 2, 8, 3, 9, 3],
[3, 4, 3, 6, 4, 6],
[4, 3, 4, 8, 3, 9],
[5, 4, 5, 2, 3, 6],
[6, 3, 7, 7, 1, 7],
[8, 5, 9, 1, 5, 2],
[5, 5, 4, 6, 5, 1],
[8, 3, 7, 3, 5, 4],
[5, 4, 4, 5, 4, 3]])
X3_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral', 'Grassy', 'Flinty']
X3_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X4_name = 'Sub-04'
X4 = np.array([[9, 5, 8, 2, 6],
[8, 7, 7, 3, 5],
[8, 8, 9, 2, 7],
[8, 8, 9, 4, 7],
[4, 2, 2, 4, 3],
[3, 2, 2, 6, 2],
[4, 4, 4, 6, 4],
[5, 2, 2, 9, 4],
[7, 5, 6, 3, 2],
[5, 6, 6, 4, 4],
[7, 3, 6, 1, 6],
[5, 2, 2, 6, 6]])
X4_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral', 'Leafy']
X4_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X5_name = 'Sub-05'
X5 = np.array([[9, 6, 9, 3, 8, 2],
[7, 7, 7, 1, 9, 2],
[7, 7, 7, 1, 7, 2],
[8, 9, 7, 5, 6, 1],
[4, 4, 4, 2, 4, 4],
[4, 5, 5, 6, 1, 5],
[6, 5, 7, 2, 3, 1],
[6, 6, 5, 8, 4, 5],
[8, 6, 8, 2, 5, 4],
[6, 6, 6, 4, 6, 3],
[7, 4, 8, 4, 5, 1],
[5, 5, 5, 5, 6, 1]])
X5_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral', 'Vegetal', 'Hay']
X5_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X6_name = 'Sub-06'
X6 = np.array([[8, 5, 6, 2, 9],
[6, 6, 6, 2, 4],
[7, 7, 7, 2, 7],
[8, 7, 8, 2, 8],
[3, 2, 2, 7, 2],
[3, 3, 3, 3, 4],
[4, 2, 3, 3, 3],
[5, 3, 5, 9, 3],
[7, 7, 7, 1, 4],
[4, 6, 2, 4, 6],
[7, 4, 8, 2, 3],
[4, 5, 3, 3, 7]])
X6_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral', 'Melon']
X6_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X7_name = 'Sub-07'
X7 = np.array([[8, 5, 8, 4],
[7, 6, 8, 4],
[6, 7, 6, 3],
[7, 8, 6, 1],
[4, 2, 3, 6],
[4, 4, 4, 4],
[4, 3, 4, 4],
[5, 3, 5, 7],
[8, 4, 9, 4],
[4, 7, 5, 2],
[8, 5, 7, 3],
[4, 3, 5, 2]])
X7_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral']
X7_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X8_name = 'Sub-08'
X8 = np.array([[7, 6, 7, 4, 9, 2],
[6, 5, 6, 2, 7, 2],
[6, 6, 6, 4, 9, 2],
[8, 7, 8, 2, 8, 2],
[3, 3, 4, 4, 4, 4],
[4, 4, 4, 7, 3, 6],
[5, 3, 5, 3, 3, 5],
[6, 4, 6, 3, 2, 4],
[8, 6, 5, 4, 5, 4],
[5, 7, 5, 4, 6, 1],
[7, 4, 8, 2, 6, 2],
[5, 4, 6, 2, 4, 3]])
X8_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral', 'Cutgrass', 'Smoky']
X8_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X9_name = 'Sub-09'
X9 = np.array([[8, 6, 9, 1, 7],
[8, 7, 9, 1, 6],
[7, 7, 8, 4, 7],
[8, 9, 9, 3, 9],
[3, 4, 4, 5, 4],
[5, 5, 5, 7, 2],
[5, 5, 5, 6, 3],
[5, 5, 6, 5, 3],
[8, 7, 8, 4, 7],
[5, 6, 4, 5, 6],
[8, 4, 7, 4, 5],
[5, 4, 5, 3, 4]])
X9_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral', 'Peach']
X9_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
X10_name = 'Sub-10'
X10 = np.array([[8, 6, 7, 5],
[7, 5, 7, 3],
[7, 6, 6, 2],
[8, 7, 7, 4],
[2, 3, 1, 7],
[3, 3, 3, 9],
[4, 2, 5, 8],
[3, 4, 2, 8],
[8, 6, 7, 4],
[5, 6, 4, 4],
[7, 4, 8, 5],
[5, 4, 6, 6]])
X10_col_names = ['CatPee', 'PassionFruit', 'GreenPepper', 'Mineral']
X10_row_names = ['NZ_1', 'NZ_2', 'NZ_3', 'NZ_4', 'FR_1', 'FR_2', 'FR_3', 'FR_4', 'CA_1', 'CA_2', 'CA_3', 'CA_4']
def get_wine_data():
X = [STATISData(X=X1, ID=X1_name, normalize=('zscore', 'norm_one'), col_names=X1_col_names, row_names=X1_row_names),
STATISData(X=X2, ID=X2_name, normalize=('zscore', 'norm_one'), col_names=X2_col_names, row_names=X1_row_names),
STATISData(X=X3, ID=X3_name, normalize=('zscore', 'norm_one'), col_names=X3_col_names, row_names=X1_row_names),
STATISData(X=X4, ID=X4_name, normalize=('zscore', 'norm_one'), col_names=X4_col_names, row_names=X1_row_names),
STATISData(X=X5, ID=X5_name, normalize=('zscore', 'norm_one'), col_names=X5_col_names, row_names=X1_row_names),
STATISData(X=X6, ID=X6_name, normalize=('zscore', 'norm_one'), col_names=X6_col_names, row_names=X1_row_names),
STATISData(X=X7, ID=X7_name, normalize=('zscore', 'norm_one'), col_names=X7_col_names, row_names=X1_row_names),
STATISData(X=X8, ID=X8_name, normalize=('zscore', 'norm_one'), col_names=X8_col_names, row_names=X1_row_names),
STATISData(X=X9, ID=X9_name, normalize=('zscore', 'norm_one'), col_names=X9_col_names, row_names=X1_row_names),
STATISData(X=X10, ID=X10_name, normalize=('zscore', 'norm_one'), col_names=X10_col_names,
row_names=X1_row_names)
]
return X
sup_name = "Supplementary"
sup = np.array([[5.6, 3.38, 14, 3],
[5.3, 3.53, 13.5, 3.6],
[6.2, 3.27, 14, 3],
[8.5, 3.19, 13.5, 3.9],
[5, 3.6, 12.5, 1.5],
[5.88, 3, 12.5, 2],
[4.5, 3.33, 13, 0.8],
[5.6, 3.4, 12, 2.1],
[7.6, 3.3, 13, 2.8],
[5.7, 3.43, 13.5, 2.1],
[6.2, 3.3, 12.5, 2.5],
[6.9, 2.2, 13, 2]])
sup_col_names = ['titratable acidity', 'pH', 'alcohol', 'residual sugar']
def get_sup_wine_data():
Xsup = [STATISData(X=sup, ID=sup_name, normalize=('zscore', 'norm_one'), col_names=sup_col_names,
row_names=X1_row_names)]
return Xsup |
bguillot/OpenUpgrade | refs/heads/master | openerp/addons/test_convert/__openerp__.py | 437 | {
'name': 'test_convert',
'description': "Data for xml conversion tests",
'version': '0.0.1',
}
|
1upon0/rfid-auth-system | refs/heads/master | GUI/printer/Pillow-2.7.0/build/lib.linux-x86_64-2.7/PIL/JpegImagePlugin.py | 13 | #
# The Python Imaging Library.
# $Id$
#
# JPEG (JFIF) file handling
#
# See "Digital Compression and Coding of Continous-Tone Still Images,
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
#
# History:
# 1995-09-09 fl Created
# 1995-09-13 fl Added full parser
# 1996-03-25 fl Added hack to use the IJG command line utilities
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
# 1996-05-28 fl Added draft support, JFIF version (0.1)
# 1996-12-30 fl Added encoder options, added progression property (0.2)
# 1997-08-27 fl Save mode 1 images as BW (0.3)
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
# 2003-09-13 fl Extract COM markers
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
# 2009-03-08 fl Added subsampling support (from Justin Huff).
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.6"
import array
import struct
import io
from struct import unpack
from PIL import Image, ImageFile, TiffImagePlugin, _binary
from PIL.JpegPresets import presets
from PIL._util import isStringType
i8 = _binary.i8
o8 = _binary.o8
i16 = _binary.i16be
i32 = _binary.i32be
#
# Parser
def Skip(self, marker):
n = i16(self.fp.read(2))-2
ImageFile._safe_read(self.fp, n)
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker & 15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == b"JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = i8(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == b"Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = i8(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
elif marker == 0xFFE2 and s[:4] == b"MPF\0":
# extract MPO information
self.info["mp"] = s[4:]
# offset is current location minus buffer size
# plus constant header size
self.info["mpoffset"] = self.fp.tell() - n + 4
def COM(self, marker):
#
# Comment marker. Store these in the APP dictionary.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
def SOF(self, marker):
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.size = i16(s[3:]), i16(s[1:])
self.bits = i8(s[0])
if self.bits != 8:
raise SyntaxError("cannot handle %d-bit layers" % self.bits)
self.layers = i8(s[5])
if self.layers == 1:
self.mode = "L"
elif self.layers == 3:
self.mode = "RGB"
elif self.layers == 4:
self.mode = "CMYK"
else:
raise SyntaxError("cannot handle %d-layer images" % self.layers)
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progressive"] = self.info["progression"] = 1
if self.icclist:
# fixup icc profile
self.icclist.sort() # sort by sequence number
if i8(self.icclist[0][13]) == len(self.icclist):
profile = []
for p in self.icclist:
profile.append(p[14:])
icc_profile = b"".join(profile)
else:
icc_profile = None # wrong number of fragments
self.info["icc_profile"] = icc_profile
self.icclist = None
for i in range(6, len(s), 3):
t = s[i:i+3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append((t[0], i8(t[1])//16, i8(t[1]) & 15, i8(t[2])))
def DQT(self, marker):
#
# Define quantization table. Support baseline 8-bit tables
# only. Note that there might be more than one table in
# each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
while len(s):
if len(s) < 65:
raise SyntaxError("bad quantization table marker")
v = i8(s[0])
if v//16 == 0:
self.quantization[v & 15] = array.array("b", s[1:65])
s = s[65:]
else:
return # FIXME: add code to read 16-bit tables!
# raise SyntaxError, "bad quantization table element size"
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", COM)
}
def _accept(prefix):
return prefix[0:1] == b"\377"
##
# Image plugin for JPEG and JFIF images.
class JpegImageFile(ImageFile.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self):
s = self.fp.read(1)
if i8(s[0]) != 255:
raise SyntaxError("not a JPEG file")
# Create attributes
self.bits = self.layers = 0
# JPEG specifics (internal)
self.layer = []
self.huffman_dc = {}
self.huffman_ac = {}
self.quantization = {}
self.app = {} # compatibility
self.applist = []
self.icclist = []
while True:
i = i8(s)
if i == 0xFF:
s = s + self.fp.read(1)
i = i16(s)
else:
# Skip non-0xFF junk
s = b"\xff"
continue
if i in MARKER:
name, description, handler = MARKER[i]
# print hex(i), name, description
if handler is not None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK":
rawmode = "CMYK;I" # assume adobe conventions
self.tile = [("jpeg", (0, 0) + self.size, 0,
(rawmode, ""))]
# self.__offset = self.fp.tell()
break
s = self.fp.read(1)
elif i == 0 or i == 0xFFFF:
# padded marker or junk; move on
s = b"\xff"
else:
raise SyntaxError("no marker found")
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
scale = 0
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
self.mode = mode
a = mode, ""
if size:
scale = max(self.size[0] // size[0], self.size[1] // size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1]
self.size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s)
scale = s
self.tile = [(d, e, o, a)]
self.decoderconfig = (scale, 0)
return self
def load_djpeg(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import subprocess
import tempfile
import os
f, path = tempfile.mkstemp()
os.close(f)
if os.path.exists(self.filename):
subprocess.check_call(["djpeg", "-outfile", path, self.filename])
else:
raise ValueError("Invalid Filename")
try:
self.im = Image.core.open_ppm(path)
finally:
try:
os.unlink(path)
except:
pass
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def _getexif(self):
return _getexif(self)
def _getmp(self):
return _getmp(self)
def _fixup(value):
# Helper function for _getexif() and _getmp()
if len(value) == 1:
return value[0]
return value
def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = io.BytesIO(data[6:])
head = file.read(8)
exif = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = _fixup(value)
# get exif extension
try:
file.seek(exif[0x8769])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = _fixup(value)
# get gpsinfo extension
try:
file.seek(exif[0x8825])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
exif[0x8825] = gps = {}
for key, value in info.items():
gps[key] = _fixup(value)
return exif
def _getmp(self):
# Extract MP information. This method was inspired by the "highly
# experimental" _getexif version that's been in use for years now,
# itself based on the ImageFileDirectory class in the TIFF plug-in.
# The MP record essentially consists of a TIFF file embedded in a JPEG
# application marker.
try:
data = self.info["mp"]
except KeyError:
return None
file = io.BytesIO(data)
head = file.read(8)
endianness = '>' if head[:4] == b'\x4d\x4d\x00\x2a' else '<'
mp = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
mp[key] = _fixup(value)
# it's an error not to have a number of images
try:
quant = mp[0xB001]
except KeyError:
raise SyntaxError("malformed MP Index (no number of images)")
# get MP entries
try:
mpentries = []
for entrynum in range(0, quant):
rawmpentry = mp[0xB002][entrynum * 16:(entrynum + 1) * 16]
unpackedentry = unpack('{0}LLLHH'.format(endianness), rawmpentry)
labels = ('Attribute', 'Size', 'DataOffset', 'EntryNo1',
'EntryNo2')
mpentry = dict(zip(labels, unpackedentry))
mpentryattr = {
'DependentParentImageFlag': bool(mpentry['Attribute'] &
(1 << 31)),
'DependentChildImageFlag': bool(mpentry['Attribute'] &
(1 << 30)),
'RepresentativeImageFlag': bool(mpentry['Attribute'] &
(1 << 29)),
'Reserved': (mpentry['Attribute'] & (3 << 27)) >> 27,
'ImageDataFormat': (mpentry['Attribute'] & (7 << 24)) >> 24,
'MPType': mpentry['Attribute'] & 0x00FFFFFF
}
if mpentryattr['ImageDataFormat'] == 0:
mpentryattr['ImageDataFormat'] = 'JPEG'
else:
raise SyntaxError("unsupported picture format in MPO")
mptypemap = {
0x000000: 'Undefined',
0x010001: 'Large Thumbnail (VGA Equivalent)',
0x010002: 'Large Thumbnail (Full HD Equivalent)',
0x020001: 'Multi-Frame Image (Panorama)',
0x020002: 'Multi-Frame Image: (Disparity)',
0x020003: 'Multi-Frame Image: (Multi-Angle)',
0x030000: 'Baseline MP Primary Image'
}
mpentryattr['MPType'] = mptypemap.get(mpentryattr['MPType'],
'Unknown')
mpentry['Attribute'] = mpentryattr
mpentries.append(mpentry)
mp[0xB002] = mpentries
except KeyError:
raise SyntaxError("malformed MP Index (bad MP Entry)")
# Next we should try and parse the individual image unique ID list;
# we don't because I've never seen this actually used in a real MPO
# file and so can't test it.
return mp
# --------------------------------------------------------------------
# stuff to save JPEG files
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBA": "RGB",
"RGBX": "RGB",
"CMYK": "CMYK;I", # assume adobe conventions
"YCbCr": "YCbCr",
}
zigzag_index = ( 0, 1, 5, 6, 14, 15, 27, 28,
2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43,
9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54,
20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61,
35, 36, 48, 49, 57, 58, 62, 63)
samplings = {(1, 1, 1, 1, 1, 1): 0,
(2, 1, 1, 1, 1, 1): 1,
(2, 2, 1, 1, 1, 1): 2,
}
def convert_dict_qtables(qtables):
qtables = [qtables[key] for key in range(len(qtables)) if key in qtables]
for idx, table in enumerate(qtables):
qtables[idx] = [table[i] for i in zigzag_index]
return qtables
def get_sampling(im):
# There's no subsampling when image have only 1 layer
# (grayscale images) or when they are CMYK (4 layers),
# so set subsampling to default value.
#
# NOTE: currently Pillow can't encode JPEG to YCCK format.
# If YCCK support is added in the future, subsampling code will have
# to be updated (here and in JpegEncode.c) to deal with 4 layers.
if not hasattr(im, 'layers') or im.layers in (1, 4):
return -1
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise IOError("cannot write mode %s as JPEG" % im.mode)
info = im.encoderinfo
dpi = info.get("dpi", (0, 0))
quality = info.get("quality", 0)
subsampling = info.get("subsampling", -1)
qtables = info.get("qtables")
if quality == "keep":
quality = 0
subsampling = "keep"
qtables = "keep"
elif quality in presets:
preset = presets[quality]
quality = 0
subsampling = preset.get('subsampling', -1)
qtables = preset.get('quantization')
elif not isinstance(quality, int):
raise ValueError("Invalid quality setting")
else:
if subsampling in presets:
subsampling = presets[subsampling].get('subsampling', -1)
if isStringType(qtables) and qtables in presets:
qtables = presets[qtables].get('quantization')
if subsampling == "4:4:4":
subsampling = 0
elif subsampling == "4:2:2":
subsampling = 1
elif subsampling == "4:1:1":
subsampling = 2
elif subsampling == "keep":
if im.format != "JPEG":
raise ValueError(
"Cannot use 'keep' when original image is not a JPEG")
subsampling = get_sampling(im)
def validate_qtables(qtables):
if qtables is None:
return qtables
if isStringType(qtables):
try:
lines = [int(num) for line in qtables.splitlines()
for num in line.split('#', 1)[0].split()]
except ValueError:
raise ValueError("Invalid quantization table")
else:
qtables = [lines[s:s+64] for s in range(0, len(lines), 64)]
if isinstance(qtables, (tuple, list, dict)):
if isinstance(qtables, dict):
qtables = convert_dict_qtables(qtables)
elif isinstance(qtables, tuple):
qtables = list(qtables)
if not (0 < len(qtables) < 5):
raise ValueError("None or too many quantization tables")
for idx, table in enumerate(qtables):
try:
if len(table) != 64:
raise
table = array.array('b', table)
except TypeError:
raise ValueError("Invalid quantization table")
else:
qtables[idx] = list(table)
return qtables
if qtables == "keep":
if im.format != "JPEG":
raise ValueError(
"Cannot use 'keep' when original image is not a JPEG")
qtables = getattr(im, "quantization", None)
qtables = validate_qtables(qtables)
extra = b""
icc_profile = info.get("icc_profile")
if icc_profile:
ICC_OVERHEAD_LEN = 14
MAX_BYTES_IN_MARKER = 65533
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
markers = []
while icc_profile:
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
i = 1
for marker in markers:
size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker))
extra += (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) +
o8(len(markers)) + marker)
i += 1
# get keyword arguments
im.encoderconfig = (
quality,
# "progressive" is the official name, but older documentation
# says "progression"
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
"progressive" in info or "progression" in info,
info.get("smooth", 0),
"optimize" in info,
info.get("streamtype", 0),
dpi[0], dpi[1],
subsampling,
qtables,
extra,
info.get("exif", b"")
)
# if we optimize, libjpeg needs a buffer big enough to hold the whole image
# in a shot. Guessing on the size, at im.size bytes. (raw pizel size is
# channels*size, this is a value that's been used in a django patch.
# https://github.com/jdriscoll/django-imagekit/issues/50
bufsize = 0
if "optimize" in info or "progressive" in info or "progression" in info:
if quality >= 95:
bufsize = 2 * im.size[0] * im.size[1]
else:
bufsize = im.size[0] * im.size[1]
# The exif info needs to be written as one block, + APP1, + one spare byte.
# Ensure that our buffer is big enough
bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif", b"")) + 5)
ImageFile._save(im, fp, [("jpeg", (0, 0)+im.size, 0, rawmode)], bufsize)
def _save_cjpeg(im, fp, filename):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
import os
import subprocess
tempfile = im._dump()
subprocess.check_call(["cjpeg", "-outfile", filename, tempfile])
try:
os.unlink(file)
except:
pass
##
# Factory for making JPEG and MPO instances
def jpeg_factory(fp=None, filename=None):
im = JpegImageFile(fp, filename)
mpheader = im._getmp()
try:
if mpheader[45057] > 1:
# It's actually an MPO
from .MpoImagePlugin import MpoImageFile
im = MpoImageFile(fp, filename)
except (TypeError, IndexError):
# It is really a JPEG
pass
return im
# -------------------------------------------------------------------q-
# Registry stuff
Image.register_open("JPEG", jpeg_factory, _accept)
Image.register_save("JPEG", _save)
Image.register_extension("JPEG", ".jfif")
Image.register_extension("JPEG", ".jpe")
Image.register_extension("JPEG", ".jpg")
Image.register_extension("JPEG", ".jpeg")
Image.register_mime("JPEG", "image/jpeg")
|
bgris/ODL_bgris | refs/heads/master | lib/python3.5/site-packages/scipy/optimize/minpack.py | 11 | from __future__ import division, print_function, absolute_import
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, product, greater, array,
all, where, isscalar, asarray, inf, abs,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd
from scipy._lib._util import _asarray_validated, _lazywhere
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable(x), optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the 'hybr' `method` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
info['message'] = errors['unknown']
return sol
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. None if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual variance to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output, col_deriv,
ftol, xtol, gtol, maxfev, factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown': ["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if info not in [1, 2, 3, 4] and not full_output:
if info in [5, 6, 7, 8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
if full_output:
cov_x = None
if info in [1, 2, 3, 4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (mesg, info)
else:
return (retval[0], info)
def _wrap_func(func, xdata, ydata, weights):
if weights is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
else:
def func_wrapped(params):
return weights * (func(xdata, *params) - ydata)
return func_wrapped
def _wrap_jac(jac, xdata, weights):
if weights is None:
def jac_wrapped(params):
return jac(xdata, *params)
else:
def jac_wrapped(params):
return weights[:, np.newaxis] * np.asarray(jac(xdata, *params))
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : An M-length sequence or an (k,M)-shaped array
for functions with k predictors.
The independent variable where the data is measured.
ydata : M-length sequence
The dependent data --- nominally f(xdata, ...)
p0 : None, scalar, or N-length sequence, optional
Initial guess for the parameters. If None, then the initial
values will all be 1 (if the number of parameters for the function
can be determined using introspection, otherwise a ValueError
is raised).
sigma : None or M-length sequence, optional
If not None, the uncertainties in the ydata array. These are used as
weights in the least-squares problem
i.e. minimising ``np.sum( ((f(xdata, *popt) - ydata) / sigma)**2 )``
If None, the uncertainties are assumed to be 1.
absolute_sigma : bool, optional
If False, `sigma` denotes relative weights of the data points.
The returned covariance matrix `pcov` is based on *estimated*
errors in the data, and is not affected by the overall
magnitude of the values in `sigma`. Only the relative
magnitudes of the `sigma` values matter.
If True, `sigma` describes one standard deviation errors of
the input data points. The estimated covariance in `pcov` is
based on these values.
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.) Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared error
of ``f(xdata, *popt) - ydata`` is minimized
pcov : 2d array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
stats.linregress : Calculate a linear least squares regression for two sets
of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import numpy as np
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> ydata = y + 0.2 * np.random.normal(size=len(xdata))
>>> popt, pcov = curve_fit(func, xdata, ydata)
Constrain the optimization to the region of ``0 < a < 3``, ``0 < b < 2``
and ``0 < c < 1``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 2., 1.]))
"""
if p0 is None:
# determine number of parameters by inspecting the function
from scipy._lib._util import getargspec_no_self as _getargspec
args, varargs, varkw, defaults = _getargspec(f)
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# NaNs can not be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata)
else:
ydata = np.asarray(ydata)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata)
else:
xdata = np.asarray(xdata)
weights = 1.0 / asarray(sigma) if sigma is not None else None
func = _wrap_func(f, xdata, ydata, weights)
if callable(jac):
jac = _wrap_jac(jac, xdata, weights)
elif jac is None and method != 'lm':
jac = '2-point'
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ydata.size > p0.size:
s_sq = cost / (ydata.size - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (product(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2"
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
|
ypu/tp-qemu | refs/heads/master | qemu/tests/ksm_overcommit.py | 3 | import logging
import time
import random
import math
import os
from autotest.client.shared import error
from virttest import utils_misc, utils_test, aexpect, env_process, data_dir
from autotest.client.shared import utils
try:
from virttest.staging import utils_memory
except ImportError:
from autotest.client.shared import utils_memory
def run(test, params, env):
"""
Tests KSM (Kernel Shared Memory) capability by allocating and filling
KVM guests memory using various values. KVM sets the memory as
MADV_MERGEABLE so all VM's memory can be merged. The workers in
guest writes to tmpfs filesystem thus allocations are not limited
by process max memory, only by VM's memory. Two test modes are supported -
serial and parallel.
Serial mode - uses multiple VMs, allocates memory per guest and always
verifies the correct number of shared memory.
0) Prints out the setup and initialize guest(s)
1) Fills guest with the same number (S1)
2) Random fill on the first guest
3) Random fill of the remaining VMs one by one until the
memory is completely filled (KVM stops machines which
asks for additional memory until there is available
memory) (S2, shouldn't finish)
4) Destroy all VMs but the last one
5) Checks the last VMs memory for corruption
Parallel mode - uses one VM with multiple allocator workers. Executes
scenarios in parallel to put more stress on the KVM.
0) Prints out the setup and initialize guest(s)
1) Fills memory with the same number (S1)
2) Fills memory with random numbers (S2)
3) Verifies all pages
4) Fills memory with the same number (S2)
5) Changes the last 96B (S3)
Scenarios:
S1) Fill all vms with the same value (all pages should be merged into 1)
S2) Random fill (all pages should be splitted)
S3) Fill last 96B (change only last 96B of each page; some pages will be
merged; there was a bug with data corruption)
Every worker has unique random key so we are able to verify the filled
values.
:param test: kvm test object.
:param params: Dictionary with test parameters.
:param env: Dictionary with the test environment.
:param cfg: ksm_swap - use swap?
:param cfg: ksm_overcommit_ratio - memory overcommit (serial mode only)
:param cfg: ksm_parallel_ratio - number of workers (parallel mode only)
:param cfg: ksm_host_reserve - override memory reserve on host in MB
:param cfg: ksm_guest_reserve - override memory reserve on guests in MB
:param cfg: ksm_mode - test mode {serial, parallel}
:param cfg: ksm_perf_ratio - performance ratio, increase it when your
machine is too slow
"""
def _start_allocator(vm, session, timeout):
"""
Execute ksm_overcommit_guest.py on guest, wait until it's initialized.
:param vm: VM object.
:param session: Remote session to a VM object.
:param timeout: Timeout that will be used to verify if
ksm_overcommit_guest.py started properly.
"""
logging.debug("Starting ksm_overcommit_guest.py on guest %s", vm.name)
session.sendline("python /tmp/ksm_overcommit_guest.py")
try:
session.read_until_last_line_matches(["PASS:", "FAIL:"], timeout)
except aexpect.ExpectProcessTerminatedError, details:
e_msg = ("Command ksm_overcommit_guest.py on vm '%s' failed: %s" %
(vm.name, str(details)))
raise error.TestFail(e_msg)
def _execute_allocator(command, vm, session, timeout):
"""
Execute a given command on ksm_overcommit_guest.py main loop,
indicating the vm the command was executed on.
:param command: Command that will be executed.
:param vm: VM object.
:param session: Remote session to VM object.
:param timeout: Timeout used to verify expected output.
:return: Tuple (match index, data)
"""
logging.debug("Executing '%s' on ksm_overcommit_guest.py loop, "
"vm: %s, timeout: %s", command, vm.name, timeout)
session.sendline(command)
try:
(match, data) = session.read_until_last_line_matches(
["PASS:", "FAIL:"],
timeout)
except aexpect.ExpectProcessTerminatedError, details:
e_msg = ("Failed to execute command '%s' on "
"ksm_overcommit_guest.py, vm '%s': %s" %
(command, vm.name, str(details)))
raise error.TestFail(e_msg)
return (match, data)
def get_ksmstat():
"""
Return sharing memory by ksm in MB
:return: memory in MB
"""
fpages = open('/sys/kernel/mm/ksm/pages_sharing')
ksm_pages = int(fpages.read())
fpages.close()
return ((ksm_pages * 4096) / 1e6)
def initialize_guests():
"""
Initialize guests (fill their memories with specified patterns).
"""
logging.info("Phase 1: filling guest memory pages")
for session in lsessions:
vm = lvms[lsessions.index(session)]
logging.debug("Turning off swap on vm %s", vm.name)
session.cmd("swapoff -a", timeout=300)
# Start the allocator
_start_allocator(vm, session, 60 * perf_ratio)
# Execute allocator on guests
for i in range(0, vmsc):
vm = lvms[i]
cmd = "mem = MemFill(%d, %s, %s)" % (ksm_size, skeys[i], dkeys[i])
_execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio)
cmd = "mem.value_fill(%d)" % skeys[0]
_execute_allocator(cmd, vm, lsessions[i],
fill_base_timeout * 2 * perf_ratio)
# Let ksm_overcommit_guest.py do its job
# (until shared mem reaches expected value)
shm = 0
j = 0
logging.debug("Target shared meminfo for guest %s: %s", vm.name,
ksm_size)
while ((new_ksm and (shm < (ksm_size * (i + 1)))) or
(not new_ksm and (shm < (ksm_size)))):
if j > 64:
logging.debug(utils_test.get_memory_info(lvms))
raise error.TestError("SHM didn't merge the memory until "
"the DL on guest: %s" % vm.name)
pause = ksm_size / 200 * perf_ratio
logging.debug("Waiting %ds before proceeding...", pause)
time.sleep(pause)
if (new_ksm):
shm = get_ksmstat()
else:
shm = vm.get_shared_meminfo()
logging.debug("Shared meminfo for guest %s after "
"iteration %s: %s", vm.name, j, shm)
j += 1
# Keep some reserve
pause = ksm_size / 200 * perf_ratio
logging.debug("Waiting %ds before proceeding...", pause)
time.sleep(pause)
logging.debug(utils_test.get_memory_info(lvms))
logging.info("Phase 1: PASS")
def separate_first_guest():
"""
Separate memory of the first guest by generating special random series
"""
logging.info("Phase 2: Split the pages on the first guest")
cmd = "mem.static_random_fill()"
data = _execute_allocator(cmd, lvms[0], lsessions[0],
fill_base_timeout * 2 * perf_ratio)[1]
r_msg = data.splitlines()[-1]
logging.debug("Return message of static_random_fill: %s", r_msg)
out = int(r_msg.split()[4])
logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s", ksm_size,
out, (ksm_size * 1000 / out))
logging.debug(utils_test.get_memory_info(lvms))
logging.debug("Phase 2: PASS")
def split_guest():
"""
Sequential split of pages on guests up to memory limit
"""
logging.info("Phase 3a: Sequential split of pages on guests up to "
"memory limit")
last_vm = 0
session = None
vm = None
for i in range(1, vmsc):
# Check VMs
for j in range(0, vmsc):
if not lvms[j].is_alive:
e_msg = ("VM %d died while executing static_random_fill on"
" VM %d in allocator loop" % (j, i))
raise error.TestFail(e_msg)
vm = lvms[i]
session = lsessions[i]
cmd = "mem.static_random_fill()"
logging.debug("Executing %s on ksm_overcommit_guest.py loop, "
"vm: %s", cmd, vm.name)
session.sendline(cmd)
out = ""
try:
logging.debug("Watching host mem while filling vm %s memory",
vm.name)
while (not out.startswith("PASS") and
not out.startswith("FAIL")):
if not vm.is_alive():
e_msg = ("VM %d died while executing "
"static_random_fill on allocator loop" % i)
raise error.TestFail(e_msg)
free_mem = int(utils_memory.read_from_meminfo("MemFree"))
if (ksm_swap):
free_mem = (free_mem +
int(utils_memory.read_from_meminfo("SwapFree")))
logging.debug("Free memory on host: %d", free_mem)
# We need to keep some memory for python to run.
if (free_mem < 64000) or (ksm_swap and
free_mem < (450000 * perf_ratio)):
vm.pause()
for j in range(0, i):
lvms[j].destroy(gracefully=False)
time.sleep(20)
vm.resume()
logging.debug("Only %s free memory, killing %d guests",
free_mem, (i - 1))
last_vm = i
out = session.read_nonblocking(0.1, 1)
time.sleep(2)
except OSError:
logging.debug("Only %s host free memory, killing %d guests",
free_mem, (i - 1))
logging.debug("Stopping %s", vm.name)
vm.pause()
for j in range(0, i):
logging.debug("Destroying %s", lvms[j].name)
lvms[j].destroy(gracefully=False)
time.sleep(20)
vm.resume()
last_vm = i
if last_vm != 0:
break
logging.debug("Memory filled for guest %s", vm.name)
logging.info("Phase 3a: PASS")
logging.info("Phase 3b: Verify memory of the max stressed VM")
for i in range(last_vm + 1, vmsc):
lsessions[i].close()
if i == (vmsc - 1):
logging.debug(utils_test.get_memory_info([lvms[i]]))
logging.debug("Destroying guest %s", lvms[i].name)
lvms[i].destroy(gracefully=False)
# Verify last machine with randomly generated memory
cmd = "mem.static_random_verify()"
_execute_allocator(cmd, lvms[last_vm], lsessions[last_vm],
(mem / 200 * 50 * perf_ratio))
logging.debug(utils_test.get_memory_info([lvms[last_vm]]))
lsessions[last_vm].cmd_output("die()", 20)
lvms[last_vm].destroy(gracefully=False)
logging.info("Phase 3b: PASS")
def split_parallel():
"""
Parallel page spliting
"""
logging.info("Phase 1: parallel page spliting")
# We have to wait until allocator is finished (it waits 5 seconds to
# clean the socket
session = lsessions[0]
vm = lvms[0]
for i in range(1, max_alloc):
lsessions.append(vm.wait_for_login(timeout=360))
session.cmd("swapoff -a", timeout=300)
for i in range(0, max_alloc):
# Start the allocator
_start_allocator(vm, lsessions[i], 60 * perf_ratio)
logging.info("Phase 1: PASS")
logging.info("Phase 2a: Simultaneous merging")
logging.debug("Memory used by allocator on guests = %dMB",
(ksm_size / max_alloc))
for i in range(0, max_alloc):
cmd = "mem = MemFill(%d, %s, %s)" % ((ksm_size / max_alloc),
skeys[i], dkeys[i])
_execute_allocator(cmd, vm, lsessions[i], 60 * perf_ratio)
cmd = "mem.value_fill(%d)" % (skeys[0])
_execute_allocator(cmd, vm, lsessions[i],
fill_base_timeout * perf_ratio)
# Wait until ksm_overcommit_guest.py merges pages (3 * ksm_size / 3)
shm = 0
i = 0
logging.debug("Target shared memory size: %s", ksm_size)
while (shm < ksm_size):
if i > 64:
logging.debug(utils_test.get_memory_info(lvms))
raise error.TestError("SHM didn't merge the memory until DL")
pause = ksm_size / 200 * perf_ratio
logging.debug("Waiting %ds before proceed...", pause)
time.sleep(pause)
if (new_ksm):
shm = get_ksmstat()
else:
shm = vm.get_shared_meminfo()
logging.debug("Shared meminfo after attempt %s: %s", i, shm)
i += 1
logging.debug(utils_test.get_memory_info([vm]))
logging.info("Phase 2a: PASS")
logging.info("Phase 2b: Simultaneous spliting")
# Actual splitting
for i in range(0, max_alloc):
cmd = "mem.static_random_fill()"
data = _execute_allocator(cmd, vm, lsessions[i],
fill_base_timeout * perf_ratio)[1]
data = data.splitlines()[-1]
logging.debug(data)
out = int(data.split()[4])
logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
(ksm_size / max_alloc), out,
(ksm_size * 1000 / out / max_alloc))
logging.debug(utils_test.get_memory_info([vm]))
logging.info("Phase 2b: PASS")
logging.info("Phase 2c: Simultaneous verification")
for i in range(0, max_alloc):
cmd = "mem.static_random_verify()"
data = _execute_allocator(cmd, vm, lsessions[i],
(mem / 200 * 50 * perf_ratio))[1]
logging.info("Phase 2c: PASS")
logging.info("Phase 2d: Simultaneous merging")
# Actual splitting
for i in range(0, max_alloc):
cmd = "mem.value_fill(%d)" % skeys[0]
data = _execute_allocator(cmd, vm, lsessions[i],
fill_base_timeout * 2 * perf_ratio)[1]
logging.debug(utils_test.get_memory_info([vm]))
logging.info("Phase 2d: PASS")
logging.info("Phase 2e: Simultaneous verification")
for i in range(0, max_alloc):
cmd = "mem.value_check(%d)" % skeys[0]
data = _execute_allocator(cmd, vm, lsessions[i],
(mem / 200 * 50 * perf_ratio))[1]
logging.info("Phase 2e: PASS")
logging.info("Phase 2f: Simultaneous spliting last 96B")
for i in range(0, max_alloc):
cmd = "mem.static_random_fill(96)"
data = _execute_allocator(cmd, vm, lsessions[i],
fill_base_timeout * perf_ratio)[1]
data = data.splitlines()[-1]
out = int(data.split()[4])
logging.debug("Performance: %dMB * 1000 / %dms = %dMB/s",
ksm_size / max_alloc, out,
(ksm_size * 1000 / out / max_alloc))
logging.debug(utils_test.get_memory_info([vm]))
logging.info("Phase 2f: PASS")
logging.info("Phase 2g: Simultaneous verification last 96B")
for i in range(0, max_alloc):
cmd = "mem.static_random_verify(96)"
_, data = _execute_allocator(cmd, vm, lsessions[i],
(mem / 200 * 50 * perf_ratio))
logging.debug(utils_test.get_memory_info([vm]))
logging.info("Phase 2g: PASS")
logging.debug("Cleaning up...")
for i in range(0, max_alloc):
lsessions[i].cmd_output("die()", 20)
session.close()
vm.destroy(gracefully=False)
# Main test code
logging.info("Starting phase 0: Initialization")
if utils.run("ps -C ksmtuned", ignore_status=True).exit_status == 0:
logging.info("Killing ksmtuned...")
utils.run("killall ksmtuned")
new_ksm = False
if (os.path.exists("/sys/kernel/mm/ksm/run")):
utils.run("echo 50 > /sys/kernel/mm/ksm/sleep_millisecs")
utils.run("echo 5000 > /sys/kernel/mm/ksm/pages_to_scan")
utils.run("echo 1 > /sys/kernel/mm/ksm/run")
e_up = "/sys/kernel/mm/transparent_hugepage/enabled"
e_rh = "/sys/kernel/mm/redhat_transparent_hugepage/enabled"
if os.path.exists(e_up):
utils.run("echo 'never' > %s" % e_up)
if os.path.exists(e_rh):
utils.run("echo 'never' > %s" % e_rh)
new_ksm = True
else:
try:
utils.run("modprobe ksm")
utils.run("ksmctl start 5000 100")
except error.CmdError, details:
raise error.TestFail("Failed to load KSM: %s" % details)
# host_reserve: mem reserve kept for the host system to run
host_reserve = int(params.get("ksm_host_reserve", -1))
if (host_reserve == -1):
# default host_reserve = MemAvailable + one_minimal_guest(128MB)
# later we add 64MB per additional guest
host_reserve = ((utils_memory.memtotal()
- utils_memory.read_from_meminfo("MemFree"))
/ 1024 + 128)
# using default reserve
_host_reserve = True
else:
_host_reserve = False
# guest_reserve: mem reserve kept to avoid guest OS to kill processes
guest_reserve = int(params.get("ksm_guest_reserve", -1))
if (guest_reserve == -1):
# default guest_reserve = minimal_system_mem(256MB)
# later we add tmpfs overhead
guest_reserve = 256
# using default reserve
_guest_reserve = True
else:
_guest_reserve = False
max_vms = int(params.get("max_vms", 2))
overcommit = float(params.get("ksm_overcommit_ratio", 2.0))
max_alloc = int(params.get("ksm_parallel_ratio", 1))
# vmsc: count of all used VMs
vmsc = int(overcommit) + 1
vmsc = max(vmsc, max_vms)
if (params['ksm_mode'] == "serial"):
max_alloc = vmsc
if _host_reserve:
# First round of additional guest reserves
host_reserve += vmsc * 64
_host_reserve = vmsc
host_mem = (int(utils_memory.memtotal()) / 1024 - host_reserve)
ksm_swap = False
if params.get("ksm_swap") == "yes":
ksm_swap = True
# Performance ratio
perf_ratio = params.get("ksm_perf_ratio")
if perf_ratio:
perf_ratio = float(perf_ratio)
else:
perf_ratio = 1
if (params['ksm_mode'] == "parallel"):
vmsc = 1
overcommit = 1
mem = host_mem
# 32bit system adjustment
if "64" not in params.get("vm_arch_name"):
logging.debug("Probably i386 guest architecture, "
"max allocator mem = 2G")
# Guest can have more than 2G but
# kvm mem + 1MB (allocator itself) can't
if (host_mem > 3100):
mem = 3100
if os.popen("uname -i").readline().startswith("i386"):
logging.debug("Host is i386 architecture, max guest mem is 2G")
# Guest system with qemu overhead (64M) can't have more than 2G
if mem > 3100 - 64:
mem = 3100 - 64
else:
# mem: Memory of the guest systems. Maximum must be less than
# host's physical ram
mem = int(overcommit * host_mem / vmsc)
# 32bit system adjustment
if not params['image_name'].endswith("64"):
logging.debug("Probably i386 guest architecture, "
"max allocator mem = 2G")
# Guest can have more than 2G but
# kvm mem + 1MB (allocator itself) can't
if mem - guest_reserve - 1 > 3100:
vmsc = int(math.ceil((host_mem * overcommit) /
(3100 + guest_reserve)))
if _host_reserve:
host_reserve += (vmsc - _host_reserve) * 64
host_mem -= (vmsc - _host_reserve) * 64
_host_reserve = vmsc
mem = int(math.floor(host_mem * overcommit / vmsc))
if os.popen("uname -i").readline().startswith("i386"):
logging.debug("Host is i386 architecture, max guest mem is 2G")
# Guest system with qemu overhead (64M) can't have more than 2G
if mem > 3100 - 64:
vmsc = int(math.ceil((host_mem * overcommit) /
(3100 - 64.0)))
if _host_reserve:
host_reserve += (vmsc - _host_reserve) * 64
host_mem -= (vmsc - _host_reserve) * 64
_host_reserve = vmsc
mem = int(math.floor(host_mem * overcommit / vmsc))
# 0.055 represents OS + TMPFS additional reserve per guest ram MB
if _guest_reserve:
guest_reserve += math.ceil(mem * 0.055)
swap = int(utils_memory.read_from_meminfo("SwapTotal")) / 1024
logging.debug("Overcommit = %f", overcommit)
logging.debug("True overcommit = %f ", (float(vmsc * mem) /
float(host_mem)))
logging.debug("Host memory = %dM", host_mem)
logging.debug("Guest memory = %dM", mem)
logging.debug("Using swap = %s", ksm_swap)
logging.debug("Swap = %dM", swap)
logging.debug("max_vms = %d", max_vms)
logging.debug("Count of all used VMs = %d", vmsc)
logging.debug("Performance_ratio = %f", perf_ratio)
# Generate unique keys for random series
skeys = []
dkeys = []
for i in range(0, max(vmsc, max_alloc)):
key = random.randrange(0, 255)
while key in skeys:
key = random.randrange(0, 255)
skeys.append(key)
key = random.randrange(0, 999)
while key in dkeys:
key = random.randrange(0, 999)
dkeys.append(key)
logging.debug("skeys: %s", skeys)
logging.debug("dkeys: %s", dkeys)
lvms = []
lsessions = []
# As we don't know the number and memory amount of VMs in advance,
# we need to specify and create them here
vm_name = params["main_vm"]
params['mem'] = mem
params['vms'] = vm_name
# Associate pidfile name
params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(vm_name,
'pid')
if not params.get('extra_params'):
params['extra_params'] = ' '
params['extra_params_' + vm_name] = params.get('extra_params')
params['extra_params_' + vm_name] += (" -pidfile %s" %
(params.get('pid_' + vm_name)))
params['extra_params'] = params.get('extra_params_' + vm_name)
# ksm_size: amount of memory used by allocator
ksm_size = mem - guest_reserve
logging.debug("Memory used by allocator on guests = %dM", ksm_size)
fill_base_timeout = ksm_size / 10
# Creating the first guest
env_process.preprocess_vm(test, params, env, vm_name)
lvms.append(env.get_vm(vm_name))
if not lvms[0]:
raise error.TestError("VM object not found in environment")
if not lvms[0].is_alive():
raise error.TestError("VM seems to be dead; Test requires a living "
"VM")
logging.debug("Booting first guest %s", lvms[0].name)
lsessions.append(lvms[0].wait_for_login(timeout=360))
# Associate vm PID
try:
tmp = open(params.get('pid_' + vm_name), 'r')
params['pid_' + vm_name] = int(tmp.readline())
except Exception:
raise error.TestFail("Could not get PID of %s" % (vm_name))
# Creating other guest systems
for i in range(1, vmsc):
vm_name = "vm" + str(i + 1)
params['pid_' + vm_name] = utils_misc.generate_tmp_file_name(vm_name,
'pid')
params['extra_params_' + vm_name] = params.get('extra_params')
params['extra_params_' + vm_name] += (" -pidfile %s" %
(params.get('pid_' + vm_name)))
params['extra_params'] = params.get('extra_params_' + vm_name)
# Last VM is later used to run more allocators simultaneously
lvms.append(lvms[0].clone(vm_name, params))
env.register_vm(vm_name, lvms[i])
params['vms'] += " " + vm_name
logging.debug("Booting guest %s", lvms[i].name)
lvms[i].create()
if not lvms[i].is_alive():
raise error.TestError("VM %s seems to be dead; Test requires a"
"living VM" % lvms[i].name)
lsessions.append(lvms[i].wait_for_login(timeout=360))
try:
tmp = open(params.get('pid_' + vm_name), 'r')
params['pid_' + vm_name] = int(tmp.readline())
except Exception:
raise error.TestFail("Could not get PID of %s" % (vm_name))
# Let guests rest a little bit :-)
pause = vmsc * 2 * perf_ratio
logging.debug("Waiting %ds before proceed", pause)
time.sleep(vmsc * 2 * perf_ratio)
logging.debug(utils_test.get_memory_info(lvms))
# Copy ksm_overcommit_guest.py into guests
shared_dir = os.path.dirname(data_dir.get_data_dir())
vksmd_src = os.path.join(shared_dir, "scripts", "ksm_overcommit_guest.py")
dst_dir = "/tmp"
for vm in lvms:
vm.copy_files_to(vksmd_src, dst_dir)
logging.info("Phase 0: PASS")
if params['ksm_mode'] == "parallel":
logging.info("Starting KSM test parallel mode")
split_parallel()
logging.info("KSM test parallel mode: PASS")
elif params['ksm_mode'] == "serial":
logging.info("Starting KSM test serial mode")
initialize_guests()
separate_first_guest()
split_guest()
logging.info("KSM test serial mode: PASS")
|
agogear/python-1 | refs/heads/master | JiYouMCC/0023/guestbook/guestbook/commits/models.py | 26 | from django.db import models
class Message(models.Model):
name = models.CharField(max_length=30)
message = models.TextField(max_length=65535)
date = models.DateTimeField()
def __unicode__(self):
return self.name + ':' + self.message[:25]
|
stutivarshney/Bal-Aveksha | refs/heads/master | WebServer/BalAvekshaEnv/lib/python3.5/site-packages/pip/_vendor/progress/__init__.py | 916 | # Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division
from collections import deque
from datetime import timedelta
from math import ceil
from sys import stderr
from time import time
__version__ = '1.2'
class Infinite(object):
file = stderr
sma_window = 10
def __init__(self, *args, **kwargs):
self.index = 0
self.start_ts = time()
self._ts = self.start_ts
self._dt = deque(maxlen=self.sma_window)
for key, val in kwargs.items():
setattr(self, key, val)
def __getitem__(self, key):
if key.startswith('_'):
return None
return getattr(self, key, None)
@property
def avg(self):
return sum(self._dt) / len(self._dt) if self._dt else 0
@property
def elapsed(self):
return int(time() - self.start_ts)
@property
def elapsed_td(self):
return timedelta(seconds=self.elapsed)
def update(self):
pass
def start(self):
pass
def finish(self):
pass
def next(self, n=1):
if n > 0:
now = time()
dt = (now - self._ts) / n
self._dt.append(dt)
self._ts = now
self.index = self.index + n
self.update()
def iter(self, it):
for x in it:
yield x
self.next()
self.finish()
class Progress(Infinite):
def __init__(self, *args, **kwargs):
super(Progress, self).__init__(*args, **kwargs)
self.max = kwargs.get('max', 100)
@property
def eta(self):
return int(ceil(self.avg * self.remaining))
@property
def eta_td(self):
return timedelta(seconds=self.eta)
@property
def percent(self):
return self.progress * 100
@property
def progress(self):
return min(1, self.index / self.max)
@property
def remaining(self):
return max(self.max - self.index, 0)
def start(self):
self.update()
def goto(self, index):
incr = index - self.index
self.next(incr)
def iter(self, it):
try:
self.max = len(it)
except TypeError:
pass
for x in it:
yield x
self.next()
self.finish()
|
dhavalmanjaria/dma-student-information-system | refs/heads/master | assignments/models.py | 1 | from django.db import models
from curriculum.models import Subject
from django.urls import reverse
class Assignment(models.Model):
"""
Represents an assignment given to a class (semester)
"""
title = models.CharField(max_length=255)
subject = models.ForeignKey(Subject)
due_date = models.DateField()
description = models.TextField()
def __str__(self):
return self.title + ", " + self.subject.name + ", due: " + str(self.due_date)
def get_absolute_url(self):
return reverse('view-assignment', args=[self.id])
|
PhilSk/zulip | refs/heads/master | contrib_bots/bots/github_issues/github_issues.py | 16 | from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from . import github
import json
import os
import requests
import six.moves.configparser
import urllib.error
import urllib.parse
import urllib.request
class IssueHandler(object):
'''
This plugin facilitates sending issues to github, when
an item is prefixed with '@mention-bot'.
It will also write items to the issues stream, as well
as reporting it to github
'''
URL = 'https://api.github.com/repos/{}/{}/issues'
CHARACTER_LIMIT = 70
CONFIG_FILE = '~/.github-auth.conf'
def __init__(self):
self.repo_name = github.get_repo()
self.repo_owner = github.get_repo_owner()
def usage(self):
return '''
This plugin will allow users to flag messages
as being issues with Zulip by using te prefix '@mention-bot'.
Before running this, make sure to create a stream
called "issues" that your API user can send to.
Also, make sure that the credentials of the github bot have
been typed in correctly, that there is a personal access token
with access to public repositories ONLY,
and that the repository name is entered correctly.
Check ~/.github-auth.conf, and make sure there are
github_repo = <repo_name> (The name of the repo to post to)
github_repo_owner = <repo_owner> (The owner of the repo to post to)
github_username = <username> (The username of the GitHub bot)
github_token = <oauth_token> (The personal access token for the GitHub bot)
'''
def handle_message(self, message, client, state_handler):
original_content = message['content']
original_sender = message['sender_email']
temp_content = 'by {}:'.format(original_sender,)
new_content = temp_content + original_content
# gets the repo url
url_new = self.URL.format(self.REPO_OWNER, self.REPO_NAME)
# signs into github using the provided username and password
session = github.auth()
issue_title = message['content'].strip()
issue_content = ''
new_issue_title = ''
for part_of_title in issue_title.split():
if len(new_issue_title) < self.CHARACTER_LIMIT:
new_issue_title += '{} '.format(part_of_title)
else:
issue_content += '{} '.format(part_of_title)
new_issue_title = new_issue_title.strip()
issue_content = issue_content.strip()
new_issue_title += '...'
# Creates the issue json, that is transmitted to the github api servers
issue = {
'title': new_issue_title,
'body': '{} **Sent by [{}](https://chat.zulip.org/#) from zulip**'.format(issue_content, original_sender),
'assignee': '',
'milestone': 'none',
'labels': [''],
}
# Sends the HTTP post request
r = session.post(url_new, json.dumps(issue))
if r.ok:
# sends the message onto the 'issues' stream so it can be seen by zulip users
client.send_message(dict(
type='stream',
to='issues',
subject=message['sender_email'],
# Adds a check mark so that the user can verify if it has been sent
content='{} :heavy_check_mark:'.format(new_content),
))
return
# This means that the issue has not been sent
# sends the message onto the 'issues' stream so it can be seen by zulip users
client.send_message(dict(
type='stream',
to='issues',
subject=message['sender_email'],
# Adds a cross so that the user can see that it has failed, and provides a link to a
# google search that can (hopefully) direct them to the error
content='{} :x: Code: [{}](https://www.google.com/search?q=Github HTTP {} Error {})'
.format(new_content, r.status_code, r.status_code, r.content),
))
handler_class = IssueHandler
|
KohlsTechnology/ansible | refs/heads/devel | lib/ansible/modules/cloud/amazon/aws_ses_identity.py | 18 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aws_ses_identity
short_description: Manages SES email and domain identity
description:
- This module allows the user to manage verified email and domain identity for SES.
- This covers verifying and removing identities as well as setting up complaint, bounce
and delivery notification settings.
version_added: "2.5"
author: Ed Costello (@orthanc)
options:
identity:
description:
- This is the email address or domain to verify / delete.
- If this contains an '@' then it will be considered an email. Otherwise it will be considered a domain.
required: true
state:
description: Whether to create(or update) or delete the identity.
default: present
choices: [ 'present', 'absent' ]
bounce_notifications:
description:
- Setup the SNS topic used to report bounce notifications.
- If omitted, bounce notifications will not be delivered to a SNS topic.
- If bounce notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
complaint_notifications:
description:
- Setup the SNS topic used to report complaint notifications.
- If omitted, complaint notifications will not be delivered to a SNS topic.
- If complaint notifications are not delivered to a SNS topic, I(feedback_forwarding) must be enabled.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
delivery_notifications:
description:
- Setup the SNS topic used to report delivery notifications.
- If omitted, delivery notifications will not be delivered to a SNS topic.
suboptions:
topic:
description:
- The ARN of the topic to send notifications to.
- If omitted, notifications will not be delivered to a SNS topic.
include_headers:
description:
- Whether or not to include headers when delivering to the SNS topic.
- If I(topic) is not specified this will have no impact, but the SES setting is updated even if there is no topic.
type: bool
default: No
feedback_forwarding:
description:
- Whether or not to enable feedback forwarding.
- This can only be false if both I(bounce_notifications) and I(complaint_notifications) specify SNS topics.
type: 'bool'
default: True
requirements: [ 'botocore', 'boto3' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Ensure [email protected] email identity exists
aws_ses_identity:
identity: [email protected]
state: present
- name: Delete [email protected] email identity
aws_ses_identity:
email: [email protected]
state: absent
- name: Ensure example.com domain identity exists
aws_ses_identity:
identity: example.com
state: present
# Create an SNS topic and send bounce and complaint notifications to it
# instead of emailing the identity owner
- name: Ensure complaints-topic exists
sns_topic:
name: "complaints-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Deliver feedback to topic instead of owner email
ses_identity:
identity: [email protected]
state: present
complaint_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: True
bounce_notifications:
topic: "{{ topic_info.sns_arn }}"
include_headers: False
feedback_forwarding: False
# Create an SNS topic for delivery notifications and leave complaints
# Being forwarded to the identity owner email
- name: Ensure delivery-notifications-topic exists
sns_topic:
name: "delivery-notifications-topic"
state: present
purge_subscriptions: False
register: topic_info
- name: Delivery notifications to topic
ses_identity:
identity: [email protected]
state: present
delivery_notifications:
topic: "{{ topic_info.sns_arn }}"
'''
RETURN = '''
identity:
description: The identity being modified.
returned: success
type: string
sample: [email protected]
identity_arn:
description: The arn of the identity being modified.
returned: success
type: string
sample: arn:aws:ses:us-east-1:12345678:identity/[email protected]
verification_attributes:
description: The verification information for the identity.
returned: success
type: complex
sample: {
"verification_status": "Pending",
"verification_token": "...."
}
contains:
verification_status:
description: The verification status of the identity.
type: string
sample: "Pending"
verification_token:
description: The verification token for a domain identity.
type: string
notification_attributes:
description: The notification setup for the identity.
returned: success
type: complex
sample: {
"bounce_topic": "arn:aws:sns:....",
"complaint_topic": "arn:aws:sns:....",
"delivery_topic": "arn:aws:sns:....",
"forwarding_enabled": false,
"headers_in_bounce_notifications_enabled": true,
"headers_in_complaint_notifications_enabled": true,
"headers_in_delivery_notifications_enabled": true
}
contains:
bounce_topic:
description:
- The ARN of the topic bounce notifications are delivered to.
- Omitted if bounce notifications are not delivered to a topic.
type: string
complaint_topic:
description:
- The ARN of the topic complaint notifications are delivered to.
- Omitted if complaint notifications are not delivered to a topic.
type: string
delivery_topic:
description:
- The ARN of the topic delivery notifications are delivered to.
- Omitted if delivery notifications are not delivered to a topic.
type: string
forwarding_enabled:
description: Whether or not feedback forwarding is enabled.
type: bool
headers_in_bounce_notifications_enabled:
description: Whether or not headers are included in messages delivered to the bounce topic.
type: bool
headers_in_complaint_notifications_enabled:
description: Whether or not headers are included in messages delivered to the complaint topic.
type: bool
headers_in_delivery_notifications_enabled:
description: Whether or not headers are included in messages delivered to the delivery topic.
type: bool
error:
description: The details of the error response from AWS.
returned: on client error from AWS
type: complex
sample: {
"code": "InvalidParameterValue",
"message": "Feedback notification topic is not set.",
"type": "Sender"
}
contains:
code:
description: The AWS error code.
type: string
message:
description: The AWS error message.
type: string
type:
description: The AWS error type.
type: string
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import HAS_BOTO3
import time
import traceback
try:
from botocore.exceptions import BotoCoreError, ClientError
from botocore.config import Config
except ImportError:
pass # caught by imported HAS_BOTO3
def call_and_handle_errors(module, method, **kwargs):
try:
return method(**kwargs)
except ClientError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
def get_verification_attributes(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_verification_attributes doesn't include the identity even when we've
# just registered it. Suspect this is an eventual consistency issue on AWS side.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only after registering the identity.
for attempt in range(0, retries + 1):
response = call_and_handle_errors(module, connection.get_identity_verification_attributes, Identities=[identity])
identity_verification = response['VerificationAttributes']
if identity in identity_verification:
break
time.sleep(retryDelay)
if identity not in identity_verification:
return None
return identity_verification[identity]
def get_identity_notifications(connection, module, identity, retries=0, retryDelay=10):
# Unpredictably get_identity_notifications doesn't include the notifications when we've
# just registered the identity.
# Don't want this complexity exposed users of the module as they'd have to retry to ensure
# a consistent return from the module.
# To avoid this we have an internal retry that we use only when getting the current notification
# status for return.
for attempt in range(0, retries + 1):
response = call_and_handle_errors(module, connection.get_identity_notification_attributes, Identities=[identity])
notification_attributes = response['NotificationAttributes']
# No clear AWS docs on when this happens, but it appears sometimes identities are not included in
# in the notification attributes when the identity is first registered. Suspect that this is caused by
# eventual consistency within the AWS services. It's been observed in builds so we need to handle it.
#
# When this occurs, just return None and we'll assume no identity notification settings have been changed
# from the default which is reasonable if this is just eventual consistency on creation.
# See: https://github.com/ansible/ansible/issues/36065
if identity in notification_attributes:
break
else:
# Paranoia check for coding errors, we only requested one identity, so if we get a different one
# something has gone very wrong.
if len(notification_attributes) != 0:
module.fail_json(
msg='Unexpected identity found in notification attributes, expected {0} but got {1!r}.'.format(
identity,
notification_attributes.keys(),
)
)
time.sleep(retryDelay)
if identity not in notification_attributes:
return None
return notification_attributes[identity]
def update_notification_topic(connection, module, identity, identity_notifications, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
topic_key = notification_type + 'Topic'
if identity_notifications is None:
# If there is no configuration for notifications cannot be being sent to topics
# hence assume None as the current state.
current = None
elif topic_key in identity_notifications:
current = identity_notifications[topic_key]
else:
# If there is information on the notifications setup but no information on the
# particular notification topic it's pretty safe to assume there's no topic for
# this notification. AWS API docs suggest this information will always be
# included but best to be defensive
current = None
if arg_dict is not None and 'topic' in arg_dict:
required = arg_dict['topic']
else:
required = None
if current != required:
call_and_handle_errors(
module,
connection.set_identity_notification_topic,
Identity=identity,
NotificationType=notification_type,
SnsTopic=required,
)
return True
return False
def update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type):
arg_dict = module.params.get(notification_type.lower() + '_notifications')
header_key = 'HeadersIn' + notification_type + 'NotificationsEnabled'
if identity_notifications is None:
# If there is no configuration for topic notifications, headers cannot be being
# forwarded, hence assume false.
current = False
elif header_key in identity_notifications:
current = identity_notifications[header_key]
else:
# AWS API doc indicates that the headers in fields are optional. Unfortunately
# it's not clear on what this means. But it's a pretty safe assumption that it means
# headers are not included since most API consumers would interpret absence as false.
current = False
if arg_dict is not None and 'include_headers' in arg_dict:
required = arg_dict['include_headers']
else:
required = False
if current != required:
call_and_handle_errors(
module,
connection.set_identity_headers_in_notifications_enabled,
Identity=identity,
NotificationType=notification_type,
Enabled=required,
)
return True
return False
def update_feedback_forwarding(connection, module, identity, identity_notifications):
if identity_notifications is None:
# AWS requires feedback forwarding to be enabled unless bounces and complaints
# are being handled by SNS topics. So in the absence of identity_notifications
# information existing feedback forwarding must be on.
current = True
elif 'ForwardingEnabled' in identity_notifications:
current = identity_notifications['ForwardingEnabled']
else:
# If there is information on the notifications setup but no information on the
# forwarding state it's pretty safe to assume forwarding is off. AWS API docs
# suggest this information will always be included but best to be defensive
current = False
required = module.params.get('feedback_forwarding')
if current != required:
call_and_handle_errors(
module,
connection.set_identity_feedback_forwarding_enabled,
Identity=identity,
ForwardingEnabled=required,
)
return True
return False
def update_identity_notifications(connection, module):
identity = module.params.get('identity')
changed = False
identity_notifications = get_identity_notifications(connection, module, identity)
for notification_type in ('Bounce', 'Complaint', 'Delivery'):
changed |= update_notification_topic(connection, module, identity, identity_notifications, notification_type)
changed |= update_notification_topic_headers(connection, module, identity, identity_notifications, notification_type)
changed |= update_feedback_forwarding(connection, module, identity, identity_notifications)
if changed or identity_notifications is None:
identity_notifications = get_identity_notifications(connection, module, identity, retries=4)
return changed, identity_notifications
def create_or_update_identity(connection, module, region, account_id):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is None:
if '@' in identity:
call_and_handle_errors(module, connection.verify_email_identity, EmailAddress=identity)
else:
call_and_handle_errors(module, connection.verify_domain_identity, Domain=identity)
verification_attributes = get_verification_attributes(connection, module, identity, retries=4)
changed = True
elif verification_attributes['VerificationStatus'] not in ('Pending', 'Success'):
module.fail_json(msg="Identity " + identity + " in bad status " + verification_attributes['VerificationStatus'],
verification_attributes=camel_dict_to_snake_dict(verification_attributes))
if verification_attributes is None:
module.fail_json(msg='Unable to load identity verification attributes after registering identity.')
notifications_changed, notification_attributes = update_identity_notifications(connection, module)
changed |= notifications_changed
if notification_attributes is None:
module.fail_json(msg='Unable to load identity notification attributes.')
identity_arn = 'arn:aws:ses:' + region + ':' + account_id + ':identity/' + identity
module.exit_json(
changed=changed,
identity=identity,
identity_arn=identity_arn,
verification_attributes=camel_dict_to_snake_dict(verification_attributes),
notification_attributes=camel_dict_to_snake_dict(notification_attributes),
)
def destroy_identity(connection, module):
identity = module.params.get('identity')
changed = False
verification_attributes = get_verification_attributes(connection, module, identity)
if verification_attributes is not None:
call_and_handle_errors(module, connection.delete_identity, Identity=identity)
changed = True
module.exit_json(
changed=changed,
identity=identity,
)
def get_account_id(sts):
caller_identity = sts.get_caller_identity()
return caller_identity['Account']
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
identity=dict(required=True, type='str'),
state=dict(default='present', choices=['present', 'absent']),
bounce_notifications=dict(type='dict'),
complaint_notifications=dict(type='dict'),
delivery_notifications=dict(type='dict'),
feedback_forwarding=dict(default=True, type='bool'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
for notification_type in ('bounce', 'complaint', 'delivery'):
param_name = notification_type + '_notifications'
arg_dict = module.params.get(param_name)
if arg_dict:
extra_keys = [x for x in arg_dict.keys() if x not in ('topic', 'include_headers')]
if extra_keys:
module.fail_json(msg='Unexpected keys ' + str(extra_keys) + ' in ' + param_name + ' valid keys are topic or include_headers')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
# Allow up to 10 attempts to call the SES APIs before giving up (9 retries).
# SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs.
# Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but
# the ansible build runs multiple instances of the test in parallel.
# As a result there are build failures due to throttling that exceeds boto's default retries.
# The back-off is exponential, so upping the retry attempts allows multiple parallel runs
# to succeed.
boto_core_config = Config(retries={'max_attempts': 9})
connection = boto3_conn(module, conn_type='client', resource='ses', region=region, endpoint=ec2_url, config=boto_core_config, **aws_connect_params)
state = module.params.get("state")
if state == 'present':
sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_params)
account_id = get_account_id(sts)
create_or_update_identity(connection, module, region, account_id)
else:
destroy_identity(connection, module)
if __name__ == '__main__':
main()
|
zalando/lizzy-client | refs/heads/master | tests/test_token.py | 1 | from requests import Response
from tokens import InvalidCredentialsError
import json
import pytest
from lizzy_client.token import get_token
class FakeResponse(Response):
def __init__(self, status_code, text):
"""
:type status_code: int
:type text: ste
"""
self.status_code = status_code
self.reason = 'REASON NOT SET IN MOCK'
self._content = text
def json(self):
return json.loads(self.content)
def test_get_token(monkeypatch):
monkeypatch.setattr('os.environ', {'OAUTH2_ACCESS_TOKENS': 'lizzy=4CCE5570K3N'})
access_token = get_token('https://token.example', scopes='scope', credentials_dir='/meta/credentials')
assert access_token == '4CCE5570K3N'
|
laboiteproject/laboite-backend | refs/heads/master | boites/migrations/0014_auto_20180527_2112.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-27 19:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boites', '0013_auto_20171211_1135'),
]
operations = [
migrations.AddField(
model_name='boite',
name='screen',
field=models.PositiveSmallIntegerField(choices=[(1, 'Écran monochrome 32×16'), (2, 'Écran bicolore 32×16')], default=1, help_text="Veuillez sélectionner l'écran qui compose votre boîte", verbose_name="Type d'écran"),
),
migrations.AlterField(
model_name='boite',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL, verbose_name='Utilisateur'),
),
migrations.AlterField(
model_name='pushbutton',
name='boite',
field=models.OneToOneField(on_delete=django.db.models.deletion.DO_NOTHING, to='boites.Boite', verbose_name='Boîte'),
),
migrations.AlterField(
model_name='tile',
name='duration',
field=models.PositiveSmallIntegerField(default=5000, help_text='Veuillez saisir une durée durant laquelle la tuile sera affichée (en millisecondes)', verbose_name="Durée d'affichage de la tuile"),
),
]
|
SUSE/azure-sdk-for-python | refs/heads/master | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/probe.py | 2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class Probe(SubResource):
"""A load balancer probe.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar load_balancing_rules: The load balancer rules that use this probe.
:vartype load_balancing_rules: list of :class:`SubResource
<azure.mgmt.network.v2016_09_01.models.SubResource>`
:param protocol: The protocol of the end point. Possible values are:
'Http' or 'Tcp'. If 'Tcp' is specified, a received ACK is required for the
probe to be successful. If 'Http' is specified, a 200 OK response from the
specifies URI is required for the probe to be successful. Possible values
include: 'Http', 'Tcp'
:type protocol: str or :class:`ProbeProtocol
<azure.mgmt.network.v2016_09_01.models.ProbeProtocol>`
:param port: The port for communicating the probe. Possible values range
from 1 to 65535, inclusive.
:type port: int
:param interval_in_seconds: The interval, in seconds, for how frequently
to probe the endpoint for health status. Typically, the interval is
slightly less than half the allocated timeout period (in seconds) which
allows two full probes before taking the instance out of rotation. The
default value is 15, the minimum value is 5.
:type interval_in_seconds: int
:param number_of_probes: The number of probes where if no response, will
result in stopping further traffic from being delivered to the endpoint.
This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
:type number_of_probes: int
:param request_path: The URI used for requesting health status from the
VM. Path is required if a protocol is set to http. Otherwise, it is not
allowed. There is no default value.
:type request_path: str
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'load_balancing_rules': {'readonly': True},
'protocol': {'required': True},
'port': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'port': {'key': 'properties.port', 'type': 'int'},
'interval_in_seconds': {'key': 'properties.intervalInSeconds', 'type': 'int'},
'number_of_probes': {'key': 'properties.numberOfProbes', 'type': 'int'},
'request_path': {'key': 'properties.requestPath', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, protocol, port, id=None, interval_in_seconds=None, number_of_probes=None, request_path=None, provisioning_state=None, name=None, etag=None):
super(Probe, self).__init__(id=id)
self.load_balancing_rules = None
self.protocol = protocol
self.port = port
self.interval_in_seconds = interval_in_seconds
self.number_of_probes = number_of_probes
self.request_path = request_path
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
luiseduardohdbackup/odoo | refs/heads/8.0 | addons/stock/report/report_stock.py | 376 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.sql import drop_view_if_exists
class report_stock_lines_date(osv.osv):
_name = "report.stock.lines.date"
_description = "Dates of Inventories and latest Moves"
_auto = False
_order = "date"
_columns = {
'id': fields.integer('Product Id', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True, select=True),
'date': fields.datetime('Date of latest Inventory', readonly=True),
'move_date': fields.datetime('Date of latest Stock Move', readonly=True),
"active": fields.boolean("Active", readonly=True),
}
def init(self, cr):
drop_view_if_exists(cr, 'report_stock_lines_date')
cr.execute("""
create or replace view report_stock_lines_date as (
select
p.id as id,
p.id as product_id,
max(s.date) as date,
max(m.date) as move_date,
p.active as active
from
product_product p
left join (
stock_inventory_line l
inner join stock_inventory s on (l.inventory_id=s.id and s.state = 'done')
) on (p.id=l.product_id)
left join stock_move m on (m.product_id=p.id and m.state = 'done')
group by p.id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
madewokherd/urk | refs/heads/master | scripts/theme.py | 1 | import time
import windows
import widgets
import chaninfo
import events
from conf import conf
textareas = {}
if 'font' in conf:
textareas['font'] = conf['font']
if 'bg_color' in conf:
textareas['bg'] = conf['bg_color']
if 'fg_color' in conf:
textareas['fg'] = conf['fg_color']
widgets.set_style("view", textareas)
widgets.set_style("nicklist", textareas)
#copied pretty directly from something that was probably copied from wine sources
def RGBtoHSL(r, g, b):
maxval = max(r, g, b)
minval = min(r, g, b)
luminosity = ((maxval + minval) * 240 + 255) // 510
if maxval == minval:
saturation = 0
hue = 160
else:
delta = maxval - minval
if luminosity <= 120:
saturation = ((maxval+minval)//2 + delta*240) // (maxval + minval)
else:
saturation = ((150-maxval-minval)//2 + delta*240) // (150-maxval-minval)
#sigh..
rnorm = (delta//2 + maxval*40 - r*40)//delta
gnorm = (delta//2 + maxval*40 - g*40)//delta
bnorm = (delta//2 + maxval*40 - b*40)//delta
if r == maxval:
hue = bnorm-gnorm
elif g == maxval:
hue = 80+rnorm-bnorm
else:
hue = 160+gnorm-rnorm
hue = hue % 240
return hue, saturation, luminosity
#copied from the same place
def huetoRGB(hue, mid1, mid2):
hue = hue % 240
if hue > 160:
return mid1
elif hue > 120:
hue = 160 - hue
elif hue > 40:
return mid2
return ((hue * (mid2 - mid1) + 20) // 40) + mid1
#this too
def HSLtoRGB(hue, saturation, luminosity):
if saturation != 0:
if luminosity > 120:
mid2 = saturation + luminosity - (saturation * luminosity + 120)//240
else:
mid2 = ((saturation + 240) * luminosity + 120)//240
mid1 = luminosity * 2 - mid2
return tuple((huetoRGB(hue+x, mid1, mid2) * 255 + 120) // 240 for x in (80,0,-80))
else:
value = luminosity * 255 // 240
return value, value, value
def gethashcolor(string):
h = hash(string)
rgb = HSLtoRGB(h%241, 100-h//241%61, 90)
return "%02x%02x%02x" % rgb
#take an event e and trigger the highlight event if necessary
def hilight_text(e):
if not hasattr(e, 'Highlight'):
e.Highlight = []
events.trigger('Highlight', e)
#hilight own nick
def onHighlight(e):
lowertext = e.text.lower()
for word in conf.get('highlight_words', []) + [e.network.me] + e.network.nicks:
lowerword = word.lower()
pos = lowertext.find(lowerword, 0)
while pos != -1:
e.Highlight.append((pos, pos+len(word)))
pos = lowertext.find(lowerword, pos+1)
def prefix(e):
return time.strftime(conf.get('timestamp', ''))
def getsourcecolor(e):
address = getattr(e, "address", "")
if address:
if e.network.me == e.source:
e.network._my_address = address
elif e.network.me == e.source:
address = getattr(e.network, "_my_address", "")
if '@' in address:
address = address.split('@')[1]
if not address:
address = e.source
return "\x04%s" % gethashcolor(address)
def format_source(e):
highlight = getattr(e, "Highlight", "") and '\x02' or ''
return "%s\x04%s%s" % (highlight, getsourcecolor(e), e.source)
def format_info_source(e):
if e.source == e.network.me:
return "\x04%sYou" % (getsourcecolor(e))
else:
return "\x04%s%s" % (getsourcecolor(e), e.source)
def address(e):
#if e.source != e.network.me:
# return "%s " % info_in_brackets(e.address)
#else:
# return ""
return ""
def text(e):
if e.text:
#return " %s" % info_in_brackets(e.text)
return ": \x0F%s" % e.text
else:
return ""
def info_in_brackets(text):
return "(\x044881b6%s\x0F)" % text
def pretty_time(secs):
times = (
#("years", "year", 31556952),
("weeks", "week", 604800),
("days", "day", 86400),
("hours", "hour", 3600),
("minutes", "minute", 60),
("seconds", "second", 1),
)
if secs == 0:
return "0 seconds"
result = ""
for plural, singular, amount in times:
n, secs = divmod(secs, amount)
if n == 1:
result = result + " %s %s" % (n, singular)
elif n:
result = result + " %s %s" % (n, plural)
return result[1:]
def onText(e):
hilight_text(e)
color = getsourcecolor(e)
to_write = prefix(e)
if e.network.me == e.target: # this is a pm
if e.window.id == e.network.norm_case(e.source):
to_write += "\x02<\x0F%s\x0F\x02>\x0F " % (format_source(e))
else:
to_write += "\x02*\x0F%s\x0F\x02*\x0F " % (format_source(e))
else:
if e.window.id == e.network.norm_case(e.target):
to_write += "\x02<\x0F%s\x0F\x02>\x0F " % (format_source(e))
else:
to_write += "\x02<\x0F%s:%s\x0F\x02>\x0F " % (format_source(e), e.target)
to_write += e.text
if e.Highlight:
e.window.write(to_write, widgets.HILIT)
else:
e.window.write(to_write, widgets.TEXT)
def onOwnText(e):
color = getsourcecolor(e)
to_write = prefix(e)
if e.window.id == e.network.norm_case(e.target):
to_write += "\x02<\x0F%s\x0F\x02>\x0F %s" % (format_source(e), e.text)
else:
to_write += "%s->\x0F \x02*\x0F%s\x0F\x02*\x0F %s" % (color, e.target, e.text)
e.window.write(to_write)
def onAction(e):
hilight_text(e)
color = color = getsourcecolor(e)
to_write = "%s\x02*\x0F %s\x0F %s" % (prefix(e), format_source(e), e.text)
if e.Highlight:
e.window.write(to_write, widgets.HILIT)
else:
e.window.write(to_write, widgets.TEXT)
def onOwnAction(e):
color = getsourcecolor(e)
to_write = "%s\x02*\x0F %s\x0F %s" % (prefix(e), format_source(e), e.text)
e.window.write(to_write)
def onNotice(e):
hilight_text(e)
color = getsourcecolor(e)
to_write = prefix(e)
if e.network.me == e.target: # this is a pm
to_write += "\x02-\x0F%s\x0F\x02-\x0F " % (format_source(e))
else:
to_write += "\x02-\x0F%s:%s\x0F\x02-\x0F " % (format_source(e), e.target)
to_write += e.text
e.window.write(to_write, (e.Highlight and widgets.HILIT) or widgets.TEXT)
def onOwnNotice(e):
color = getsourcecolor(e)
to_write = "%s-> \x02-\x02%s\x0F\x02-\x0F %s" % (prefix(e), e.target, e.text)
e.window.write(to_write)
def onCtcp(e):
color = getsourcecolor(e)
to_write = "%s\x02[\x02%s\x0F\x02]\x0F %s" % (prefix(e), format_source(e), e.text)
if not e.quiet:
e.window.write(to_write)
def onCtcpReply(e):
color = getsourcecolor(e)
to_write = "%s%s--- %s reply from %s:\x0F %s" % (prefix(e), color, e.name.capitalize(), format_source(e), ' '.join(e.args))
window = windows.manager.get_active()
if window.network != e.network:
window = windows.get_default(e.network)
window.write(to_write, widgets.TEXT)
def onJoin(e):
if e.source == e.network.me:
to_write = "%s%s %sjoin %s" % (prefix(e), format_info_source(e), address(e), e.target)
else:
to_write = "%s%s %sjoins %s" % (prefix(e), format_info_source(e), address(e), e.target)
e.window.write(to_write)
def onPart(e):
if e.source == e.network.me:
to_write = "%s%s leave %s%s" % (prefix(e), format_info_source(e), e.target, text(e))
else:
to_write = "%s%s leaves %s%s" % (prefix(e), format_info_source(e), e.target, text(e))
e.window.write(to_write)
def onKick(e):
if e.source == e.network.me:
to_write = "%s%s kick %s%s" % (prefix(e), format_info_source(e), e.target, text(e))
else:
to_write = "%s%s kicks %s%s" % (prefix(e), format_info_source(e), e.target, text(e))
e.window.write(to_write, (e.target == e.network.me and widgets.HILIT) or widgets.EVENT)
def onMode(e):
if e.source == e.network.me:
to_write = "%s%s set mode:\x0F %s" % (prefix(e), format_info_source(e), e.text)
else:
to_write = "%s%s sets mode:\x0F %s" % (prefix(e), format_info_source(e), e.text)
e.window.write(to_write)
def onQuit(e):
to_write = "%s%s leaves%s" % (prefix(e), format_info_source(e), text(e))
for channame in chaninfo.channels(e.network):
if chaninfo.ison(e.network, channame, e.source):
window = windows.get(windows.ChannelWindow, e.network, channame)
if window:
window.write(to_write)
def onNick(e):
color = getsourcecolor(e)
if e.source == e.network.me:
to_write = "%s%sYou are now known as %s" % (prefix(e), color, e.target)
else:
to_write = "%s%s%s is now known as %s" % (prefix(e), color, e.source, e.target)
if e.source == e.network.me:
for window in windows.get_with(network=e.network):
window.write(to_write)
else:
for channame in chaninfo.channels(e.network):
if chaninfo.ison(e.network,channame,e.source):
window = windows.get(windows.ChannelWindow, e.network, channame)
if window:
window.write(to_write)
def onTopic(e):
if e.source == e.network.me:
to_write = "%s%s set topic:\x0F %s" % (prefix(e), format_info_source(e), e.text)
else:
to_write = "%s%s sets topic:\x0F %s" % (prefix(e), format_info_source(e), e.text)
e.window.write(to_write)
def onRaw(e):
if not e.quiet:
if e.msg[1].isdigit():
if e.msg[1] == '332':
window = windows.get(windows.ChannelWindow, e.network, e.msg[3]) or e.window
window.write(
"%sTopic on %s is: %s" %
(prefix(e), e.msg[3], e.text)
)
elif e.msg[1] == '333':
window = windows.get(windows.ChannelWindow, e.network, e.msg[3]) or e.window
window.write(
"%sTopic on %s set by %s at time %s" %
(prefix(e), e.msg[3], e.msg[4], time.ctime(int(e.msg[5])))
)
elif e.msg[1] == '329': #RPL_CREATIONTIME
pass
elif e.msg[1] == '311': #RPL_WHOISUSER
e.window.write("* %s is %s@%s * %s" % (e.msg[3], e.msg[4], e.msg[5], e.msg[7]))
elif e.msg[1] == '312': #RPL_WHOISSERVER
e.window.write("* %s on %s (%s)" % (e.msg[3], e.msg[4], e.msg[5]))
elif e.msg[1] == '317': #RPL_WHOISIDLE
e.window.write("* %s has been idle for %s" % (e.msg[3], pretty_time(int(e.msg[4]))))
if e.msg[5].isdigit():
e.window.write("* %s signed on %s" % (e.msg[3], time.ctime(int(e.msg[5]))))
elif e.msg[1] == '319': #RPL_WHOISCHANNELS
e.window.write("* %s on channels: %s" % (e.msg[3], e.msg[4]))
elif e.msg[1] == '330': #RPL_WHOISACCOUNT
#this appears to conflict with another raw, so if there's anything weird about it,
# we fall back on the default
if len(e.msg) == 6 and not e.msg[4].isdigit() and not e.msg[5].isdigit():
e.window.write("* %s %s %s" % (e.msg[3], e.msg[5], e.msg[4]))
else:
e.window.write("* %s" % ' '.join(e.msg[3:]))
else:
e.window.write("* %s" % ' '.join(e.msg[3:]))
elif e.msg[1] == 'ERROR':
e.window.write("Error: %s" % e.text)
def onDisconnect(e):
to_write = '%s* Disconnected' % prefix(e)
if e.error:
to_write += ' (%s)' % e.error
for window in windows.get_with(network=e.network):
if isinstance(window, windows.StatusWindow):
window.write(to_write, widgets.TEXT)
else:
window.write(to_write, widgets.EVENT)
|
vipul-sharma20/oh-mainline | refs/heads/master | vendor/packages/Django/django/conf/locale/da/formats.py | 106 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', # '25.10.2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
zeeman/cyder | refs/heads/master | vendor-local/src/django-tastypie/tests/gis/api/resources.py | 58 | from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ALL
from tastypie.contrib.gis.resources import ModelResource
from tastypie.authorization import Authorization
from gis.models import GeoNote
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
authorization = Authorization()
class GeoNoteResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
resource_name = 'geonotes'
queryset = GeoNote.objects.all()
authorization = Authorization()
filtering = {
'points': ALL,
'lines': ALL,
'polys': ALL,
}
|
AndrewGrossman/django | refs/heads/master | django/db/migrations/operations/base.py | 356 | from __future__ import unicode_literals
from django.db import router
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
Due to the way this class deals with deconstruction, it should be
considered immutable.
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
# Can this migration be represented as SQL? (things like RunPython cannot)
reduces_to_sql = True
# Should this operation be forced as atomic even on backends with no
# DDL transaction support (i.e., does it have no DDL, like RunPython)
atomic = False
serialization_expand_args = []
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError('subclasses of Operation must provide a database_backwards() method')
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
returning a false positive will merely make the optimizer a little
less efficient, while returning a false negative may result in an
unusable optimized migration.
"""
return True
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
"""
return self.references_model(model_name, app_label)
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.
"""
if not model._meta.can_migrate(connection_alias):
return False
return router.allow_migrate_model(connection_alias, model)
def __repr__(self):
return "<%s %s%s>" % (
self.__class__.__name__,
", ".join(map(repr, self._constructor_args[0])),
",".join(" %s=%r" % x for x in self._constructor_args[1].items()),
)
|
RafaelTorrealba/odoo | refs/heads/8.0 | addons/crm/crm_lead.py | 77 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm
from datetime import datetime
from operator import itemgetter
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.addons.base.res.res_partner import format_address
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
from openerp.tools import email_re, email_split
CRM_LEAD_FIELDS_TO_MERGE = ['name',
'partner_id',
'campaign_id',
'company_id',
'country_id',
'section_id',
'state_id',
'stage_id',
'medium_id',
'source_id',
'user_id',
'title',
'city',
'contact_name',
'description',
'email',
'fax',
'mobile',
'partner_name',
'phone',
'probability',
'planned_revenue',
'street',
'street2',
'zip',
'create_date',
'date_action_last',
'date_action_next',
'email_from',
'email_cc',
'partner_name']
class crm_lead(format_address, osv.osv):
""" CRM Lead Case """
_name = "crm.lead"
_description = "Lead/Opportunity"
_order = "priority desc,date_action,id desc"
_inherit = ['mail.thread', 'ir.needaction_mixin', 'crm.tracking.mixin']
_track = {
'stage_id': {
# this is only an heuristics; depending on your particular stage configuration it may not match all 'new' stages
'crm.mt_lead_create': lambda self, cr, uid, obj, ctx=None: obj.probability == 0 and obj.stage_id and obj.stage_id.sequence <= 1,
'crm.mt_lead_stage': lambda self, cr, uid, obj, ctx=None: (obj.stage_id and obj.stage_id.sequence > 1) and obj.probability < 100,
'crm.mt_lead_won': lambda self, cr, uid, obj, ctx=None: obj.probability == 100 and obj.stage_id and obj.stage_id.fold,
'crm.mt_lead_lost': lambda self, cr, uid, obj, ctx=None: obj.probability == 0 and obj.stage_id and obj.stage_id.fold and obj.stage_id.sequence > 1,
},
}
_mail_mass_mailing = _('Leads / Opportunities')
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
if context.get('default_type') == 'lead':
context['empty_list_help_model'] = 'crm.case.section'
context['empty_list_help_id'] = context.get('default_section_id')
context['empty_list_help_document_name'] = _("leads")
return super(crm_lead, self).get_empty_list_help(cr, uid, help, context=context)
def _get_default_section_id(self, cr, uid, user_id=False, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, user_id or uid, context).default_section_id.id or False
return section_id
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
section_id = self._get_default_section_id(cr, uid, context=context)
return self.stage_find(cr, uid, [], section_id, [('fold', '=', False)], context=context)
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
def _resolve_type_from_context(self, cr, uid, context=None):
""" Returns the type (lead or opportunity) from the type context
key. Returns None if it cannot be resolved.
"""
if context is None:
context = {}
return context.get('default_type')
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
access_rights_uid = access_rights_uid or uid
stage_obj = self.pool.get('crm.case.stage')
order = stage_obj._order
# lame hack to allow reverting search, should just work in the trivial case
if read_group_order == 'stage_id desc':
order = "%s desc" % order
# retrieve section_id from the context and write the domain
# - ('id', 'in', 'ids'): add columns that should be present
# - OR ('case_default', '=', True), ('fold', '=', False): add default columns that are not folded
# - OR ('section_ids', '=', section_id), ('fold', '=', False) if section_id: add section columns that are not folded
search_domain = []
section_id = self._resolve_section_id_from_context(cr, uid, context=context)
if section_id:
search_domain += ['|', ('section_ids', '=', section_id)]
search_domain += [('id', 'in', ids)]
else:
search_domain += ['|', ('id', 'in', ids), ('case_default', '=', True)]
# retrieve type from the context (if set: choose 'type' or 'both')
type = self._resolve_type_from_context(cr, uid, context=context)
if type:
search_domain += ['|', ('type', '=', type), ('type', '=', 'both')]
# perform search
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context and context.get('opportunity_id'):
action = self.get_formview_action(cr, user, context['opportunity_id'], context=context)
if action.get('views') and any(view_id for view_id in action['views'] if view_id[1] == view_type):
view_id = next(view_id[0] for view_id in action['views'] if view_id[1] == view_type)
res = super(crm_lead, self).fields_view_get(cr, user, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self.fields_view_get_address(cr, user, res['arch'], context=context)
return res
_group_by_full = {
'stage_id': _read_group_stage_ids
}
def _compute_day(self, cr, uid, ids, fields, args, context=None):
"""
:return dict: difference between current date and log date
"""
res = {}
for lead in self.browse(cr, uid, ids, context=context):
for field in fields:
res[lead.id] = {}
duration = 0
ans = False
if field == 'day_open':
if lead.date_open:
date_create = datetime.strptime(lead.create_date, "%Y-%m-%d %H:%M:%S")
date_open = datetime.strptime(lead.date_open, "%Y-%m-%d %H:%M:%S")
ans = date_open - date_create
elif field == 'day_close':
if lead.date_closed:
date_create = datetime.strptime(lead.create_date, "%Y-%m-%d %H:%M:%S")
date_close = datetime.strptime(lead.date_closed, "%Y-%m-%d %H:%M:%S")
ans = date_close - date_create
if ans:
duration = abs(int(ans.days))
res[lead.id][field] = duration
return res
def _meeting_count(self, cr, uid, ids, field_name, arg, context=None):
Event = self.pool['calendar.event']
return {
opp_id: Event.search_count(cr,uid, [('opportunity_id', '=', opp_id)], context=context)
for opp_id in ids
}
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='set null', track_visibility='onchange',
select=True, help="Linked partner (optional). Usually created when converting the lead."),
'id': fields.integer('ID', readonly=True),
'name': fields.char('Subject', required=True, select=1),
'active': fields.boolean('Active', required=False),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'email_from': fields.char('Email', size=128, help="Email address of the contact", select=1),
'section_id': fields.many2one('crm.case.section', 'Sales Team',
select=True, track_visibility='onchange', help='When sending mails, the default email address is taken from the sales team.'),
'create_date': fields.datetime('Creation Date', readonly=True),
'email_cc': fields.text('Global CC', help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'description': fields.text('Notes'),
'write_date': fields.datetime('Update Date', readonly=True),
'categ_ids': fields.many2many('crm.case.categ', 'crm_lead_category_rel', 'lead_id', 'category_id', 'Tags', \
domain="['|', ('section_id', '=', section_id), ('section_id', '=', False), ('object_id.model', '=', 'crm.lead')]", help="Classify and analyze your lead/opportunity categories like: Training, Service"),
'contact_name': fields.char('Contact Name', size=64),
'partner_name': fields.char("Customer Name", size=64,help='The name of the future partner company that will be created while converting the lead into opportunity', select=1),
'opt_out': fields.boolean('Opt-Out', oldname='optout',
help="If opt-out is checked, this contact has refused to receive emails for mass mailing and marketing campaign. "
"Filter 'Available for Mass Mailing' allows users to filter the leads when performing mass mailing."),
'type': fields.selection([ ('lead','Lead'), ('opportunity','Opportunity'), ],'Type', select=True, help="Type is used to separate Leads and Opportunities"),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority', select=True),
'date_closed': fields.datetime('Closed', readonly=True, copy=False),
'stage_id': fields.many2one('crm.case.stage', 'Stage', track_visibility='onchange', select=True,
domain="['&', ('section_ids', '=', section_id), '|', ('type', '=', type), ('type', '=', 'both')]"),
'user_id': fields.many2one('res.users', 'Salesperson', select=True, track_visibility='onchange'),
'referred': fields.char('Referred By'),
'date_open': fields.datetime('Assigned', readonly=True),
'day_open': fields.function(_compute_day, string='Days to Assign',
multi='day_open', type="float",
store={'crm.lead': (lambda self, cr, uid, ids, c={}: ids, ['date_open'], 10)}),
'day_close': fields.function(_compute_day, string='Days to Close',
multi='day_open', type="float",
store={'crm.lead': (lambda self, cr, uid, ids, c={}: ids, ['date_closed'], 10)}),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True),
# Messaging and marketing
'message_bounce': fields.integer('Bounce'),
# Only used for type opportunity
'probability': fields.float('Success Rate (%)', group_operator="avg"),
'planned_revenue': fields.float('Expected Revenue', track_visibility='always'),
'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
'ref2': fields.reference('Reference 2', selection=openerp.addons.base.res.res_request.referencable_models),
'phone': fields.char("Phone", size=64),
'date_deadline': fields.date('Expected Closing', help="Estimate of the date on which the opportunity will be won."),
'date_action': fields.date('Next Action Date', select=True),
'title_action': fields.char('Next Action'),
'color': fields.integer('Color Index'),
'partner_address_name': fields.related('partner_id', 'name', type='char', string='Partner Contact Name', readonly=True),
'partner_address_email': fields.related('partner_id', 'email', type='char', string='Partner Contact Email', readonly=True),
'company_currency': fields.related('company_id', 'currency_id', type='many2one', string='Currency', readonly=True, relation="res.currency"),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'user_login': fields.related('user_id', 'login', type='char', string='User Login', readonly=True),
# Fields for address, due to separation from crm and res.partner
'street': fields.char('Street'),
'street2': fields.char('Street2'),
'zip': fields.char('Zip', change_default=True, size=24),
'city': fields.char('City'),
'state_id': fields.many2one("res.country.state", 'State'),
'country_id': fields.many2one('res.country', 'Country'),
'phone': fields.char('Phone'),
'fax': fields.char('Fax'),
'mobile': fields.char('Mobile'),
'function': fields.char('Function'),
'title': fields.many2one('res.partner.title', 'Title'),
'company_id': fields.many2one('res.company', 'Company', select=1),
'payment_mode': fields.many2one('crm.payment.mode', 'Payment Mode', \
domain="[('section_id','=',section_id)]"),
'planned_cost': fields.float('Planned Costs'),
'meeting_count': fields.function(_meeting_count, string='# Meetings', type='integer'),
}
_defaults = {
'active': 1,
'type': 'lead',
'user_id': lambda s, cr, uid, c: uid,
'stage_id': lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c),
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, context=c),
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.lead', context=c),
'priority': lambda *a: crm.AVAILABLE_PRIORITIES[2][0],
'color': 0,
'date_last_stage_update': fields.datetime.now,
}
_sql_constraints = [
('check_probability', 'check(probability >= 0 and probability <= 100)', 'The probability of closing the deal should be between 0% and 100%!')
]
def onchange_stage_id(self, cr, uid, ids, stage_id, context=None):
if not stage_id:
return {'value': {}}
stage = self.pool.get('crm.case.stage').browse(cr, uid, stage_id, context=context)
if not stage.on_change:
return {'value': {}}
vals = {'probability': stage.probability}
if stage.probability >= 100 or (stage.probability == 0 and stage.sequence > 1):
vals['date_closed'] = fields.datetime.now()
return {'value': vals}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
partner_name = (partner.parent_id and partner.parent_id.name) or (partner.is_company and partner.name) or False
values = {
'partner_name': partner_name,
'contact_name': (not partner.is_company and partner.name) or False,
'title': partner.title and partner.title.id or False,
'street': partner.street,
'street2': partner.street2,
'city': partner.city,
'state_id': partner.state_id and partner.state_id.id or False,
'country_id': partner.country_id and partner.country_id.id or False,
'email_from': partner.email,
'phone': partner.phone,
'mobile': partner.mobile,
'fax': partner.fax,
'zip': partner.zip,
'function': partner.function,
}
return {'value': values}
def on_change_user(self, cr, uid, ids, user_id, context=None):
""" When changing the user, also set a section_id or restrict section id
to the ones user_id is member of. """
section_id = self._get_default_section_id(cr, uid, user_id=user_id, context=context) or False
if user_id and self.pool['res.users'].has_group(cr, uid, 'base.group_multi_salesteams') and not section_id:
section_ids = self.pool.get('crm.case.section').search(cr, uid, ['|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context)
if section_ids:
section_id = section_ids[0]
return {'value': {'section_id': section_id}}
def stage_find(self, cr, uid, cases, section_id, domain=None, order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- type: stage type must be the same or 'both'
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
if context is None:
context = {}
# check whether we should try to add a condition on type
avoid_add_type_term = any([term for term in domain if len(term) == 3 if term[0] == 'type'])
# collect all section_ids
section_ids = set()
types = ['both']
if not cases and context.get('default_type'):
ctx_type = context.get('default_type')
types += [ctx_type]
if section_id:
section_ids.add(section_id)
for lead in cases:
if lead.section_id:
section_ids.add(lead.section_id.id)
if lead.type not in types:
types.append(lead.type)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * len(section_ids)
for section_id in section_ids:
search_domain.append(('section_ids', '=', section_id))
search_domain.append(('case_default', '=', True))
# AND with cases types
if not avoid_add_type_term:
search_domain.append(('type', 'in', types))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('crm.case.stage').search(cr, uid, search_domain, order=order, limit=1, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_mark_lost(self, cr, uid, ids, context=None):
""" Mark the case as lost: state=cancel and probability=0
"""
stages_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
stage_id = self.stage_find(cr, uid, [lead], lead.section_id.id or False, [('probability', '=', 0.0), ('on_change', '=', True), ('sequence', '>', 1)], context=context)
if stage_id:
if stages_leads.get(stage_id):
stages_leads[stage_id].append(lead.id)
else:
stages_leads[stage_id] = [lead.id]
else:
raise osv.except_osv(_('Warning!'),
_('To relieve your sales pipe and group all Lost opportunities, configure one of your sales stage as follow:\n'
'probability = 0 %, select "Change Probability Automatically".\n'
'Create a specific stage or edit an existing one by editing columns of your opportunity pipe.'))
for stage_id, lead_ids in stages_leads.items():
self.write(cr, uid, lead_ids, {'stage_id': stage_id}, context=context)
return True
def case_mark_won(self, cr, uid, ids, context=None):
""" Mark the case as won: state=done and probability=100
"""
stages_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
stage_id = self.stage_find(cr, uid, [lead], lead.section_id.id or False, [('probability', '=', 100.0), ('on_change', '=', True)], context=context)
if stage_id:
if stages_leads.get(stage_id):
stages_leads[stage_id].append(lead.id)
else:
stages_leads[stage_id] = [lead.id]
else:
raise osv.except_osv(_('Warning!'),
_('To relieve your sales pipe and group all Won opportunities, configure one of your sales stage as follow:\n'
'probability = 100 % and select "Change Probability Automatically".\n'
'Create a specific stage or edit an existing one by editing columns of your opportunity pipe.'))
for stage_id, lead_ids in stages_leads.items():
self.write(cr, uid, lead_ids, {'stage_id': stage_id}, context=context)
return True
def case_escalate(self, cr, uid, ids, context=None):
""" Escalates case to parent level """
for case in self.browse(cr, uid, ids, context=context):
data = {'active': True}
if case.section_id.parent_id:
data['section_id'] = case.section_id.parent_id.id
if case.section_id.parent_id.change_responsible:
if case.section_id.parent_id.user_id:
data['user_id'] = case.section_id.parent_id.user_id.id
else:
raise osv.except_osv(_('Error!'), _("You are already at the top level of your sales-team category.\nTherefore you cannot escalate furthermore."))
self.write(cr, uid, [case.id], data, context=context)
return True
def _merge_get_result_type(self, cr, uid, opps, context=None):
"""
Define the type of the result of the merge. If at least one of the
element to merge is an opp, the resulting new element will be an opp.
Otherwise it will be a lead.
We'll directly use a list of browse records instead of a list of ids
for performances' sake: it will spare a second browse of the
leads/opps.
:param list opps: list of browse records containing the leads/opps to process
:return string type: the type of the final element
"""
for opp in opps:
if (opp.type == 'opportunity'):
return 'opportunity'
return 'lead'
def _merge_data(self, cr, uid, ids, oldest, fields, context=None):
"""
Prepare lead/opp data into a dictionary for merging. Different types
of fields are processed in different ways:
- text: all the values are concatenated
- m2m and o2m: those fields aren't processed
- m2o: the first not null value prevails (the other are dropped)
- any other type of field: same as m2o
:param list ids: list of ids of the leads to process
:param list fields: list of leads' fields to process
:return dict data: contains the merged values
"""
opportunities = self.browse(cr, uid, ids, context=context)
def _get_first_not_null(attr):
for opp in opportunities:
if hasattr(opp, attr) and bool(getattr(opp, attr)):
return getattr(opp, attr)
return False
def _get_first_not_null_id(attr):
res = _get_first_not_null(attr)
return res and res.id or False
def _concat_all(attr):
return '\n\n'.join(filter(lambda x: x, [getattr(opp, attr) or '' for opp in opportunities if hasattr(opp, attr)]))
# Process the fields' values
data = {}
for field_name in fields:
field = self._fields.get(field_name)
if field is None:
continue
if field.type in ('many2many', 'one2many'):
continue
elif field.type == 'many2one':
data[field_name] = _get_first_not_null_id(field_name) # !!
elif field.type == 'text':
data[field_name] = _concat_all(field_name) #not lost
else:
data[field_name] = _get_first_not_null(field_name) #not lost
# Define the resulting type ('lead' or 'opportunity')
data['type'] = self._merge_get_result_type(cr, uid, opportunities, context)
return data
def _mail_body(self, cr, uid, lead, fields, title=False, context=None):
body = []
if title:
body.append("%s\n" % (title))
for field_name in fields:
field = self._fields.get(field_name)
if field is None:
continue
value = ''
if field.type == 'selection':
if callable(field.selection):
key = field.selection(self, cr, uid, context=context)
else:
key = field.selection
value = dict(key).get(lead[field_name], lead[field_name])
elif field.type == 'many2one':
if lead[field_name]:
value = lead[field_name].name_get()[0][1]
elif field.type == 'many2many':
if lead[field_name]:
for val in lead[field_name]:
field_value = val.name_get()[0][1]
value += field_value + ","
else:
value = lead[field_name]
body.append("%s: %s" % (field.string, value or ''))
return "<br/>".join(body + ['<br/>'])
def _merge_notify(self, cr, uid, opportunity_id, opportunities, context=None):
"""
Create a message gathering merged leads/opps information.
"""
#TOFIX: mail template should be used instead of fix body, subject text
details = []
result_type = self._merge_get_result_type(cr, uid, opportunities, context)
if result_type == 'lead':
merge_message = _('Merged leads')
else:
merge_message = _('Merged opportunities')
subject = [merge_message]
for opportunity in opportunities:
subject.append(opportunity.name)
title = "%s : %s" % (opportunity.type == 'opportunity' and _('Merged opportunity') or _('Merged lead'), opportunity.name)
fields = list(CRM_LEAD_FIELDS_TO_MERGE)
details.append(self._mail_body(cr, uid, opportunity, fields, title=title, context=context))
# Chatter message's subject
subject = subject[0] + ": " + ", ".join(subject[1:])
details = "\n\n".join(details)
return self.message_post(cr, uid, [opportunity_id], body=details, subject=subject, context=context)
def _merge_opportunity_history(self, cr, uid, opportunity_id, opportunities, context=None):
message = self.pool.get('mail.message')
for opportunity in opportunities:
for history in opportunity.message_ids:
message.write(cr, uid, history.id, {
'res_id': opportunity_id,
'subject' : _("From %s : %s") % (opportunity.name, history.subject)
}, context=context)
return True
def _merge_opportunity_attachments(self, cr, uid, opportunity_id, opportunities, context=None):
attach_obj = self.pool.get('ir.attachment')
# return attachments of opportunity
def _get_attachments(opportunity_id):
attachment_ids = attach_obj.search(cr, uid, [('res_model', '=', self._name), ('res_id', '=', opportunity_id)], context=context)
return attach_obj.browse(cr, uid, attachment_ids, context=context)
first_attachments = _get_attachments(opportunity_id)
#counter of all attachments to move. Used to make sure the name is different for all attachments
count = 1
for opportunity in opportunities:
attachments = _get_attachments(opportunity.id)
for attachment in attachments:
values = {'res_id': opportunity_id,}
for attachment_in_first in first_attachments:
if attachment.name == attachment_in_first.name:
values['name'] = "%s (%s)" % (attachment.name, count,),
count+=1
attachment.write(values)
return True
def _merge_opportunity_phonecalls(self, cr, uid, opportunity_id, opportunities, context=None):
phonecall_obj = self.pool['crm.phonecall']
for opportunity in opportunities:
for phonecall_id in phonecall_obj.search(cr, uid, [('opportunity_id', '=', opportunity.id)], context=context):
phonecall_obj.write(cr, uid, phonecall_id, {'opportunity_id': opportunity_id}, context=context)
return True
def get_duplicated_leads(self, cr, uid, ids, partner_id, include_lost=False, context=None):
"""
Search for opportunities that have the same partner and that arent done or cancelled
"""
lead = self.browse(cr, uid, ids[0], context=context)
email = lead.partner_id and lead.partner_id.email or lead.email_from
return self.pool['crm.lead']._get_duplicated_leads_by_emails(cr, uid, partner_id, email, include_lost=include_lost, context=context)
def _get_duplicated_leads_by_emails(self, cr, uid, partner_id, email, include_lost=False, context=None):
"""
Search for opportunities that have the same partner and that arent done or cancelled
"""
final_stage_domain = [('stage_id.probability', '<', 100), '|', ('stage_id.probability', '>', 0), ('stage_id.sequence', '<=', 1)]
partner_match_domain = []
for email in set(email_split(email) + [email]):
partner_match_domain.append(('email_from', '=ilike', email))
if partner_id:
partner_match_domain.append(('partner_id', '=', partner_id))
partner_match_domain = ['|'] * (len(partner_match_domain) - 1) + partner_match_domain
if not partner_match_domain:
return []
domain = partner_match_domain
if not include_lost:
domain += final_stage_domain
return self.search(cr, uid, domain, context=context)
def merge_dependences(self, cr, uid, highest, opportunities, context=None):
self._merge_notify(cr, uid, highest, opportunities, context=context)
self._merge_opportunity_history(cr, uid, highest, opportunities, context=context)
self._merge_opportunity_attachments(cr, uid, highest, opportunities, context=context)
self._merge_opportunity_phonecalls(cr, uid, highest, opportunities, context=context)
def merge_opportunity(self, cr, uid, ids, user_id=False, section_id=False, context=None):
"""
Different cases of merge:
- merge leads together = 1 new lead
- merge at least 1 opp with anything else (lead or opp) = 1 new opp
:param list ids: leads/opportunities ids to merge
:return int id: id of the resulting lead/opp
"""
if context is None:
context = {}
if len(ids) <= 1:
raise osv.except_osv(_('Warning!'), _('Please select more than one element (lead or opportunity) from the list view.'))
opportunities = self.browse(cr, uid, ids, context=context)
sequenced_opps = []
# Sorting the leads/opps according to the confidence level of its stage, which relates to the probability of winning it
# The confidence level increases with the stage sequence, except when the stage probability is 0.0 (Lost cases)
# An Opportunity always has higher confidence level than a lead, unless its stage probability is 0.0
for opportunity in opportunities:
sequence = -1
if opportunity.stage_id and not opportunity.stage_id.fold:
sequence = opportunity.stage_id.sequence
sequenced_opps.append(((int(sequence != -1 and opportunity.type == 'opportunity'), sequence, -opportunity.id), opportunity))
sequenced_opps.sort(reverse=True)
opportunities = map(itemgetter(1), sequenced_opps)
ids = [opportunity.id for opportunity in opportunities]
highest = opportunities[0]
opportunities_rest = opportunities[1:]
tail_opportunities = opportunities_rest
fields = list(CRM_LEAD_FIELDS_TO_MERGE)
merged_data = self._merge_data(cr, uid, ids, highest, fields, context=context)
if user_id:
merged_data['user_id'] = user_id
if section_id:
merged_data['section_id'] = section_id
# Merge notifications about loss of information
opportunities = [highest]
opportunities.extend(opportunities_rest)
self.merge_dependences(cr, uid, highest.id, tail_opportunities, context=context)
# Check if the stage is in the stages of the sales team. If not, assign the stage with the lowest sequence
if merged_data.get('section_id'):
section_stage_ids = self.pool.get('crm.case.stage').search(cr, uid, [('section_ids', 'in', merged_data['section_id']), ('type', '=', merged_data.get('type'))], order='sequence', context=context)
if merged_data.get('stage_id') not in section_stage_ids:
merged_data['stage_id'] = section_stage_ids and section_stage_ids[0] or False
# Write merged data into first opportunity
self.write(cr, uid, [highest.id], merged_data, context=context)
# Delete tail opportunities
# We use the SUPERUSER to avoid access rights issues because as the user had the rights to see the records it should be safe to do so
self.unlink(cr, SUPERUSER_ID, [x.id for x in tail_opportunities], context=context)
return highest.id
def _convert_opportunity_data(self, cr, uid, lead, customer, section_id=False, context=None):
crm_stage = self.pool.get('crm.case.stage')
contact_id = False
if customer:
contact_id = self.pool.get('res.partner').address_get(cr, uid, [customer.id])['default']
if not section_id:
section_id = lead.section_id and lead.section_id.id or False
val = {
'planned_revenue': lead.planned_revenue,
'probability': lead.probability,
'name': lead.name,
'partner_id': customer and customer.id or False,
'type': 'opportunity',
'date_action': fields.datetime.now(),
'date_open': fields.datetime.now(),
'email_from': customer and customer.email or lead.email_from,
'phone': customer and customer.phone or lead.phone,
}
if not lead.stage_id or lead.stage_id.type=='lead':
val['stage_id'] = self.stage_find(cr, uid, [lead], section_id, [('type', 'in', ('opportunity', 'both'))], context=context)
return val
def convert_opportunity(self, cr, uid, ids, partner_id, user_ids=False, section_id=False, context=None):
customer = False
if partner_id:
partner = self.pool.get('res.partner')
customer = partner.browse(cr, uid, partner_id, context=context)
for lead in self.browse(cr, uid, ids, context=context):
# TDE: was if lead.state in ('done', 'cancel'):
if lead.probability == 100 or (lead.probability == 0 and lead.stage_id.fold):
continue
vals = self._convert_opportunity_data(cr, uid, lead, customer, section_id, context=context)
self.write(cr, uid, [lead.id], vals, context=context)
if user_ids or section_id:
self.allocate_salesman(cr, uid, ids, user_ids, section_id, context=context)
return True
def _lead_create_contact(self, cr, uid, lead, name, is_company, parent_id=False, context=None):
partner = self.pool.get('res.partner')
vals = {'name': name,
'user_id': lead.user_id.id,
'comment': lead.description,
'section_id': lead.section_id.id or False,
'parent_id': parent_id,
'phone': lead.phone,
'mobile': lead.mobile,
'email': tools.email_split(lead.email_from) and tools.email_split(lead.email_from)[0] or False,
'fax': lead.fax,
'title': lead.title and lead.title.id or False,
'function': lead.function,
'street': lead.street,
'street2': lead.street2,
'zip': lead.zip,
'city': lead.city,
'country_id': lead.country_id and lead.country_id.id or False,
'state_id': lead.state_id and lead.state_id.id or False,
'is_company': is_company,
'type': 'contact'
}
partner = partner.create(cr, uid, vals, context=context)
return partner
def _create_lead_partner(self, cr, uid, lead, context=None):
partner_id = False
if lead.partner_name and lead.contact_name:
partner_id = self._lead_create_contact(cr, uid, lead, lead.partner_name, True, context=context)
partner_id = self._lead_create_contact(cr, uid, lead, lead.contact_name, False, partner_id, context=context)
elif lead.partner_name and not lead.contact_name:
partner_id = self._lead_create_contact(cr, uid, lead, lead.partner_name, True, context=context)
elif not lead.partner_name and lead.contact_name:
partner_id = self._lead_create_contact(cr, uid, lead, lead.contact_name, False, context=context)
elif lead.email_from and self.pool.get('res.partner')._parse_partner_name(lead.email_from, context=context)[0]:
contact_name = self.pool.get('res.partner')._parse_partner_name(lead.email_from, context=context)[0]
partner_id = self._lead_create_contact(cr, uid, lead, contact_name, False, context=context)
else:
raise osv.except_osv(
_('Warning!'),
_('No customer name defined. Please fill one of the following fields: Company Name, Contact Name or Email ("Name <email@address>")')
)
return partner_id
def handle_partner_assignation(self, cr, uid, ids, action='create', partner_id=False, context=None):
"""
Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to the specified partner_id
:param list ids: leads/opportunities ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this is a duplication of the handle_partner_assignation method of crm_phonecall
partner_ids = {}
for lead in self.browse(cr, uid, ids, context=context):
# If the action is set to 'create' and no partner_id is set, create a new one
if lead.partner_id:
partner_ids[lead.id] = lead.partner_id.id
continue
if not partner_id and action == 'create':
partner_id = self._create_lead_partner(cr, uid, lead, context)
self.pool['res.partner'].write(cr, uid, partner_id, {'section_id': lead.section_id and lead.section_id.id or False})
if partner_id:
lead.write({'partner_id': partner_id})
partner_ids[lead.id] = partner_id
return partner_ids
def allocate_salesman(self, cr, uid, ids, user_ids=None, team_id=False, context=None):
"""
Assign salesmen and salesteam to a batch of leads. If there are more
leads than salesmen, these salesmen will be assigned in round-robin.
E.g.: 4 salesmen (S1, S2, S3, S4) for 6 leads (L1, L2, ... L6). They
will be assigned as followed: L1 - S1, L2 - S2, L3 - S3, L4 - S4,
L5 - S1, L6 - S2.
:param list ids: leads/opportunities ids to process
:param list user_ids: salesmen to assign
:param int team_id: salesteam to assign
:return bool
"""
index = 0
for lead_id in ids:
value = {}
if team_id:
value['section_id'] = team_id
if user_ids:
value['user_id'] = user_ids[index]
# Cycle through user_ids
index = (index + 1) % len(user_ids)
if value:
self.write(cr, uid, [lead_id], value, context=context)
return True
def schedule_phonecall(self, cr, uid, ids, schedule_time, call_summary, desc, phone, contact_name, user_id=False, section_id=False, categ_id=False, action='schedule', context=None):
"""
:param string action: ('schedule','Schedule a call'), ('log','Log a call')
"""
phonecall = self.pool.get('crm.phonecall')
model_data = self.pool.get('ir.model.data')
phonecall_dict = {}
if not categ_id:
try:
res_id = model_data._get_id(cr, uid, 'crm', 'categ_phone2')
categ_id = model_data.browse(cr, uid, res_id, context=context).res_id
except ValueError:
pass
for lead in self.browse(cr, uid, ids, context=context):
if not section_id:
section_id = lead.section_id and lead.section_id.id or False
if not user_id:
user_id = lead.user_id and lead.user_id.id or False
vals = {
'name': call_summary,
'opportunity_id': lead.id,
'user_id': user_id or False,
'categ_id': categ_id or False,
'description': desc or '',
'date': schedule_time,
'section_id': section_id or False,
'partner_id': lead.partner_id and lead.partner_id.id or False,
'partner_phone': phone or lead.phone or (lead.partner_id and lead.partner_id.phone or False),
'partner_mobile': lead.partner_id and lead.partner_id.mobile or False,
'priority': lead.priority,
}
new_id = phonecall.create(cr, uid, vals, context=context)
phonecall.write(cr, uid, [new_id], {'state': 'open'}, context=context)
if action == 'log':
phonecall.write(cr, uid, [new_id], {'state': 'done'}, context=context)
phonecall_dict[lead.id] = new_id
self.schedule_phonecall_send_note(cr, uid, [lead.id], new_id, action, context=context)
return phonecall_dict
def redirect_opportunity_view(self, cr, uid, opportunity_id, context=None):
models_data = self.pool.get('ir.model.data')
# Get opportunity views
dummy, form_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_form_view_oppor')
dummy, tree_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_tree_view_oppor')
return {
'name': _('Opportunity'),
'view_type': 'form',
'view_mode': 'tree, form',
'res_model': 'crm.lead',
'domain': [('type', '=', 'opportunity')],
'res_id': int(opportunity_id),
'view_id': False,
'views': [(form_view or False, 'form'),
(tree_view or False, 'tree'), (False, 'kanban'),
(False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
'context': {'default_type': 'opportunity'}
}
def redirect_lead_view(self, cr, uid, lead_id, context=None):
models_data = self.pool.get('ir.model.data')
# Get lead views
dummy, form_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_form_view_leads')
dummy, tree_view = models_data.get_object_reference(cr, uid, 'crm', 'crm_case_tree_view_leads')
return {
'name': _('Lead'),
'view_type': 'form',
'view_mode': 'tree, form',
'res_model': 'crm.lead',
'domain': [('type', '=', 'lead')],
'res_id': int(lead_id),
'view_id': False,
'views': [(form_view or False, 'form'),
(tree_view or False, 'tree'),
(False, 'calendar'), (False, 'graph')],
'type': 'ir.actions.act_window',
}
def action_schedule_meeting(self, cr, uid, ids, context=None):
"""
Open meeting's calendar view to schedule meeting on current opportunity.
:return dict: dictionary value for created Meeting view
"""
lead = self.browse(cr, uid, ids[0], context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
partner_ids = [self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id.id]
if lead.partner_id:
partner_ids.append(lead.partner_id.id)
res['context'] = {
'search_default_opportunity_id': lead.type == 'opportunity' and lead.id or False,
'default_opportunity_id': lead.type == 'opportunity' and lead.id or False,
'default_partner_id': lead.partner_id and lead.partner_id.id or False,
'default_partner_ids': partner_ids,
'default_section_id': lead.section_id and lead.section_id.id or False,
'default_name': lead.name,
}
return res
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('type') and not context.get('default_type'):
context['default_type'] = vals.get('type')
if vals.get('section_id') and not context.get('default_section_id'):
context['default_section_id'] = vals.get('section_id')
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
return super(crm_lead, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
if vals.get('user_id'):
vals['date_open'] = fields.datetime.now()
# stage change with new stage: update probability and date_closed
if vals.get('stage_id') and not vals.get('probability'):
onchange_stage_values = self.onchange_stage_id(cr, uid, ids, vals.get('stage_id'), context=context)['value']
vals.update(onchange_stage_values)
return super(crm_lead, self).write(cr, uid, ids, vals, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if not context:
context = {}
lead = self.browse(cr, uid, id, context=context)
local_context = dict(context)
local_context.setdefault('default_type', lead.type)
local_context.setdefault('default_section_id', lead.section_id.id)
if lead.type == 'opportunity':
default['date_open'] = fields.datetime.now()
else:
default['date_open'] = False
return super(crm_lead, self).copy(cr, uid, id, default, context=local_context)
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_model'] = 'crm.case.section'
context['empty_list_help_id'] = context.get('default_section_id', None)
context['empty_list_help_document_name'] = _("opportunity")
if context.get('default_type') == 'lead':
context['empty_list_help_document_name'] = _("lead")
return super(crm_lead, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Mail Gateway
# ----------------------------------------
def message_get_reply_to(self, cr, uid, ids, context=None):
""" Override to get the reply_to of the parent project. """
leads = self.browse(cr, SUPERUSER_ID, ids, context=context)
section_ids = set([lead.section_id.id for lead in leads if lead.section_id])
aliases = self.pool['crm.case.section'].message_get_reply_to(cr, uid, list(section_ids), context=context)
return dict((lead.id, aliases.get(lead.section_id and lead.section_id.id or 0, False)) for lead in leads)
def get_formview_id(self, cr, uid, id, context=None):
obj = self.browse(cr, uid, id, context=context)
if obj.type == 'opportunity':
model, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm', 'crm_case_form_view_oppor')
else:
view_id = super(crm_lead, self).get_formview_id(cr, uid, id, context=context)
return view_id
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(crm_lead, self).message_get_suggested_recipients(cr, uid, ids, context=context)
try:
for lead in self.browse(cr, uid, ids, context=context):
if lead.partner_id:
self._message_add_suggested_recipient(cr, uid, recipients, lead, partner=lead.partner_id, reason=_('Customer'))
elif lead.email_from:
self._message_add_suggested_recipient(cr, uid, recipients, lead, email=lead.email_from, reason=_('Customer Email'))
except (osv.except_osv, orm.except_orm): # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
'user_id': False,
}
if msg.get('author_id'):
defaults.update(self.on_change_partner_id(cr, uid, None, msg.get('author_id'), context=context)['value'])
if msg.get('priority') in dict(crm.AVAILABLE_PRIORITIES):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(crm_lead, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Overrides mail_thread message_update that is called by the mailgateway
through message_process.
This method updates the document according to the email.
"""
if isinstance(ids, (str, int, long)):
ids = [ids]
if update_vals is None: update_vals = {}
if msg.get('priority') in dict(crm.AVAILABLE_PRIORITIES):
update_vals['priority'] = msg.get('priority')
maps = {
'cost':'planned_cost',
'revenue': 'planned_revenue',
'probability':'probability',
}
for line in msg.get('body', '').split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res and maps.get(res.group(1).lower()):
key = maps.get(res.group(1).lower())
update_vals[key] = res.group(2).lower()
return super(crm_lead, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
# ----------------------------------------
# OpenChatter methods and notifications
# ----------------------------------------
def schedule_phonecall_send_note(self, cr, uid, ids, phonecall_id, action, context=None):
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, [phonecall_id], context=context)[0]
if action == 'log':
message = _('Logged a call for %(date)s. %(description)s')
else:
message = _('Scheduled a call for %(date)s. %(description)s')
phonecall_date = datetime.strptime(phonecall.date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
phonecall_usertime = fields.datetime.context_timestamp(cr, uid, phonecall_date, context=context).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
html_time = "<time datetime='%s+00:00'>%s</time>" % (phonecall.date, phonecall_usertime)
message = message % dict(date=html_time, description=phonecall.description)
return self.message_post(cr, uid, ids, body=message, context=context)
def log_meeting(self, cr, uid, ids, meeting_subject, meeting_date, duration, context=None):
if not duration:
duration = _('unknown')
else:
duration = str(duration)
meet_date = datetime.strptime(meeting_date, tools.DEFAULT_SERVER_DATETIME_FORMAT)
meeting_usertime = fields.datetime.context_timestamp(cr, uid, meet_date, context=context).strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
html_time = "<time datetime='%s+00:00'>%s</time>" % (meeting_date, meeting_usertime)
message = _("Meeting scheduled at '%s'<br> Subject: %s <br> Duration: %s hour(s)") % (html_time, meeting_subject, duration)
return self.message_post(cr, uid, ids, body=message, context=context)
def onchange_state(self, cr, uid, ids, state_id, context=None):
if state_id:
country_id=self.pool.get('res.country.state').browse(cr, uid, state_id, context).country_id.id
return {'value':{'country_id':country_id}}
return {}
def message_partner_info_from_emails(self, cr, uid, id, emails, link_mail=False, context=None):
res = super(crm_lead, self).message_partner_info_from_emails(cr, uid, id, emails, link_mail=link_mail, context=context)
lead = self.browse(cr, uid, id, context=context)
for partner_info in res:
if not partner_info.get('partner_id') and (lead.partner_name or lead.contact_name):
emails = email_re.findall(partner_info['full_name'] or '')
email = emails and emails[0] or ''
if email and lead.email_from and email.lower() == lead.email_from.lower():
partner_info['full_name'] = '%s <%s>' % (lead.partner_name or lead.contact_name, email)
break
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
seaotterman/tensorflow | refs/heads/master | tensorflow/contrib/keras/python/keras/layers/local.py | 27 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Locally-connected layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.engine import Layer
from tensorflow.contrib.keras.python.keras.utils import conv_utils
from tensorflow.python.framework import tensor_shape
class LocallyConnected1D(Layer):
"""Locally-connected layer for 1D inputs.
The `LocallyConnected1D` layer works similarly to
the `Conv1D` layer, except that weights are unshared,
that is, a different set of filters is applied at each different patch
of the input.
Example:
```python
# apply a unshared weight convolution 1d of length 3 to a sequence with
# 10 timesteps, with 64 output filters
model = Sequential()
model.add(LocallyConnected1D(64, 3, input_shape=(10, 32)))
# now model.output_shape == (None, 8, 64)
# add a new conv1d on top
model.add(LocallyConnected1D(32, 3))
# now model.output_shape == (None, 6, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: Currently only supports `"valid"` (case-insensitive).
`"same"` may be supported in the future.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
"""
def __init__(self,
filters,
kernel_size,
strides=1,
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(LocallyConnected1D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected1D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=3)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
input_dim = input_shape[2]
if input_dim is None:
raise ValueError('Axis 2 of input should be fully-defined. '
'Found shape:', input_shape)
output_length = conv_utils.conv_output_length(
input_shape[1], self.kernel_size[0], self.padding, self.strides[0])
self.kernel_shape = (output_length, self.kernel_size[0] * input_dim,
self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_length, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.input_spec = InputSpec(ndim=3, axes={2: input_dim})
self.built = True
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
length = conv_utils.conv_output_length(input_shape[1], self.kernel_size[0],
self.padding, self.strides[0])
return tensor_shape.TensorShape([input_shape[0], length, self.filters])
def call(self, inputs):
stride = self.strides[0]
output_length, feature_dim, filters = self.kernel_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride, i * stride + self.kernel_size[0])
xs.append(K.reshape(inputs[:, slice_length, :], (1, -1, feature_dim)))
x_aggregate = K.concatenate(xs, axis=0)
# Shape: `(output_length, batch_size, filters)`.
output = K.batch_dot(x_aggregate, self.kernel)
output = K.permute_dimensions(output, (1, 0, 2))
if self.use_bias:
output += K.reshape(self.bias, (1, output_length, filters))
if self.activation is not None:
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class LocallyConnected2D(Layer):
"""Locally-connected layer for 2D inputs.
The `LocallyConnected2D` layer works similarly
to the `Conv2D` layer, except that weights are unshared,
that is, a different set of filters is applied at each
different patch of the input.
Examples:
```python
# apply a 3x3 unshared weights convolution with 64 output filters on a
32x32 image
# with `data_format="channels_last"`:
model = Sequential()
model.add(LocallyConnected2D(64, (3, 3), input_shape=(32, 32, 3)))
# now model.output_shape == (None, 30, 30, 64)
# notice that this layer will consume (30*30)*(3*3*3*64) + (30*30)*64
parameters
# add a 3x3 unshared weights convolution on top, with 32 output filters:
model.add(LocallyConnected2D(32, (3, 3)))
# now model.output_shape == (None, 28, 28, 32)
```
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
padding: Currently only support `"valid"` (case-insensitive).
`"same"` will be supported in future.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to the kernel matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(LocallyConnected2D, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
if self.padding != 'valid':
raise ValueError('Invalid border mode for LocallyConnected2D '
'(only "valid" is supported): ' + padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=4)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
input_row, input_col = input_shape[1:-1]
input_filter = input_shape[3]
else:
input_row, input_col = input_shape[2:]
input_filter = input_shape[1]
if input_row is None or input_col is None:
raise ValueError('The spatial dimensions of the inputs to '
' a LocallyConnected2D layer '
'should be fully-defined, but layer received '
'the inputs shape ' + str(input_shape))
output_row = conv_utils.conv_output_length(input_row, self.kernel_size[0],
self.padding, self.strides[0])
output_col = conv_utils.conv_output_length(input_col, self.kernel_size[1],
self.padding, self.strides[1])
self.output_row = output_row
self.output_col = output_col
self.kernel_shape = (
output_row * output_col,
self.kernel_size[0] * self.kernel_size[1] * input_filter, self.filters)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(output_row, output_col, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
if self.data_format == 'channels_first':
self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
else:
self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
self.built = True
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding, self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding, self.strides[1])
if self.data_format == 'channels_first':
return tensor_shape.TensorShape(
[input_shape[0], self.filters, rows, cols])
elif self.data_format == 'channels_last':
return tensor_shape.TensorShape(
[input_shape[0], rows, cols, self.filters])
def call(self, inputs):
stride_row, stride_col = self.strides
_, feature_dim, filters = self.kernel_shape
if self.data_format == 'channels_first':
if K.backend() == 'theano':
output = []
for i in range(self.output_row):
for j in range(self.output_col):
slice_row = slice(i * stride_row,
i * stride_row + self.kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + self.kernel_size[1])
x_flatten = K.reshape(inputs[:, :, slice_row, slice_col],
(1, -1, feature_dim))
output.append(
K.dot(x_flatten, self.kernel[i * self.output_col + j, :, :]))
output = K.concatenate(output, axis=0)
else:
xs = []
for i in range(self.output_row):
for j in range(self.output_col):
slice_row = slice(i * stride_row,
i * stride_row + self.kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + self.kernel_size[1])
xs.append(
K.reshape(inputs[:, :, slice_row, slice_col], (1, -1,
feature_dim)))
x_aggregate = K.concatenate(xs, axis=0)
output = K.batch_dot(x_aggregate, self.kernel)
output = K.reshape(output, (self.output_row, self.output_col, -1,
filters))
output = K.permute_dimensions(output, (2, 3, 0, 1))
elif self.data_format == 'channels_last':
xs = []
for i in range(self.output_row):
for j in range(self.output_col):
slice_row = slice(i * stride_row,
i * stride_row + self.kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + self.kernel_size[1])
xs.append(
K.reshape(inputs[:, slice_row, slice_col, :], (1, -1, feature_dim
)))
x_aggregate = K.concatenate(xs, axis=0)
output = K.batch_dot(x_aggregate, self.kernel)
output = K.reshape(output, (self.output_row, self.output_col, -1,
filters))
output = K.permute_dimensions(output, (2, 0, 1, 3))
if self.use_bias:
if self.data_format == 'channels_first':
output += K.reshape(self.bias, (1, filters, self.output_row,
self.output_col))
elif self.data_format == 'channels_last':
output += K.reshape(self.bias, (1, self.output_row, self.output_col,
filters))
output = self.activation(output)
return output
def get_config(self):
config = {
'filters':
self.filters,
'kernel_size':
self.kernel_size,
'strides':
self.strides,
'padding':
self.padding,
'data_format':
self.data_format,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint)
}
base_config = super(LocallyConnected2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
tempbottle/kazoo | refs/heads/master | kazoo/tests/test_counter.py | 50 | import uuid
from nose.tools import eq_
from kazoo.testing import KazooTestCase
class KazooCounterTests(KazooTestCase):
def _makeOne(self, **kw):
path = "/" + uuid.uuid4().hex
return self.client.Counter(path, **kw)
def test_int_counter(self):
counter = self._makeOne()
eq_(counter.value, 0)
counter += 2
counter + 1
eq_(counter.value, 3)
counter -= 3
counter - 1
eq_(counter.value, -1)
def test_float_counter(self):
counter = self._makeOne(default=0.0)
eq_(counter.value, 0.0)
counter += 2.1
eq_(counter.value, 2.1)
counter -= 3.1
eq_(counter.value, -1.0)
def test_errors(self):
counter = self._makeOne()
self.assertRaises(TypeError, counter.__add__, 2.1)
self.assertRaises(TypeError, counter.__add__, b"a")
|
takis/odoo | refs/heads/8.0 | addons/gamification_sale_crm/__init__.py | 396 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
jeasoft/odoo | refs/heads/marcos-8.0 | comunity_modules/hr_payroll_cancel/__init__.py | 4 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2014 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: Luis Torres ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
|
lmazuel/azure-sdk-for-python | refs/heads/master | azure-mgmt-marketplaceordering/azure/mgmt/marketplaceordering/models/operation_display.py | 2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationDisplay(Model):
"""The object that represents the operation.
:param provider: Service provider: Microsoft.MarketplaceOrdering
:type provider: str
:param resource: Resource on which the operation is performed: Agreement,
virtualmachine, etc.
:type resource: str
:param operation: Operation type: Get Agreement, Sign Agreement, Cancel
Agreement etc.
:type operation: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(self, provider=None, resource=None, operation=None):
super(OperationDisplay, self).__init__()
self.provider = provider
self.resource = resource
self.operation = operation
|
viswimmer1/PythonGenerator | refs/heads/master | data/python_files/30423090/log.py | 152 | import logging
import sys
from django.core import mail
# Make sure a NullHandler is available
# This was added in Python 2.7/3.2
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Make sure that dictConfig is available
# This was added in Python 2.7/3.2
try:
from logging.config import dictConfig
except ImportError:
from django.utils.dictconfig import dictConfig
if sys.version_info < (2, 5):
class LoggerCompat(object):
def __init__(self, logger):
self._logger = logger
def __getattr__(self, name):
val = getattr(self._logger, name)
if callable(val):
def _wrapper(*args, **kwargs):
# Python 2.4 logging module doesn't support 'extra' parameter to
# methods of Logger
kwargs.pop('extra', None)
return val(*args, **kwargs)
return _wrapper
else:
return val
def getLogger(name=None):
return LoggerCompat(logging.getLogger(name=name))
else:
getLogger = logging.getLogger
# Ensure the creation of the Django logger
# with a null handler. This ensures we don't get any
# 'No handlers could be found for logger "django"' messages
logger = getLogger('django')
if not logger.handlers:
logger.addHandler(NullHandler())
class AdminEmailHandler(logging.Handler):
def __init__(self, include_html=False):
logging.Handler.__init__(self)
self.include_html = include_html
"""An exception log handler that e-mails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the
"""
def emit(self, record):
import traceback
from django.conf import settings
from django.views.debug import ExceptionReporter
try:
if sys.version_info < (2,5):
# A nasty workaround required because Python 2.4's logging
# module doesn't support passing in extra context.
# For this handler, the only extra data we need is the
# request, and that's in the top stack frame.
request = record.exc_info[2].tb_frame.f_locals['request']
else:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
(request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'),
record.msg
)
request_repr = repr(request)
except:
subject = '%s: %s' % (
record.levelname,
record.msg
)
request = None
request_repr = "Request repr() unavailable"
if record.exc_info:
exc_info = record.exc_info
stack_trace = '\n'.join(traceback.format_exception(*record.exc_info))
else:
exc_info = (None, record.msg, None)
stack_trace = 'No stack trace available'
message = "%s\n\n%s" % (stack_trace, request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = self.include_html and reporter.get_traceback_html() or None
mail.mail_admins(subject, message, fail_silently=True,
html_message=html_message)
|
rscnt/django-cms | refs/heads/develop | cms/plugin_rendering.py | 29 | # -*- coding: utf-8 -*-
from django.template import Template, Context
from django.template.loader import render_to_string
from django.utils import six
from django.utils.safestring import mark_safe
from cms.cache.placeholder import get_placeholder_cache, set_placeholder_cache
from cms.models.placeholdermodel import Placeholder
from cms.plugin_processors import (plugin_meta_context_processor, mark_safe_plugin_processor)
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.django_load import iterload_objects
from cms.utils.placeholder import get_placeholder_conf, restore_sekizai_context
# these are always called before all other plugin context processors
from sekizai.helpers import Watcher
DEFAULT_PLUGIN_CONTEXT_PROCESSORS = (
plugin_meta_context_processor,
)
# these are always called after all other plugin processors
DEFAULT_PLUGIN_PROCESSORS = (
mark_safe_plugin_processor,
)
class PluginContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in CMS_PLUGIN_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, dict_, instance, placeholder, processors=None, current_app=None):
if current_app:
super(PluginContext, self).__init__(dict_)
else:
super(PluginContext, self).__init__(dict_, current_app=current_app)
if not processors:
processors = []
for processor in DEFAULT_PLUGIN_CONTEXT_PROCESSORS:
self.update(processor(instance, placeholder, self))
for processor in iterload_objects(get_cms_setting('PLUGIN_CONTEXT_PROCESSORS')):
self.update(processor(instance, placeholder, self))
for processor in processors:
self.update(processor(instance, placeholder, self))
def render_plugin(context, instance, placeholder, template, processors=None, current_app=None):
"""
Renders a single plugin and applies the post processors to it's rendered
content.
"""
if current_app:
context['request'].current_app = current_app
if not processors:
processors = []
if isinstance(template, six.string_types):
content = render_to_string(template, context)
elif (isinstance(template, Template) or (hasattr(template, 'template') and
hasattr(template, 'render') and isinstance(template.template, Template))):
content = template.render(context)
else:
content = ''
for processor in iterload_objects(get_cms_setting('PLUGIN_PROCESSORS')):
content = processor(instance, placeholder, content, context)
for processor in processors:
content = processor(instance, placeholder, content, context)
for processor in DEFAULT_PLUGIN_PROCESSORS:
content = processor(instance, placeholder, content, context)
return content
def render_plugins(plugins, context, placeholder, processors=None):
"""
Renders a collection of plugins with the given context, using the appropriate processors
for a given placeholder name, and returns a list containing a "rendered content" string
for each plugin.
This is the main plugin rendering utility function, use this function rather than
Plugin.render_plugin().
"""
out = []
total = len(plugins)
for index, plugin in enumerate(plugins):
plugin._render_meta.total = total
plugin._render_meta.index = index
context.push()
out.append(plugin.render_plugin(context, placeholder, processors=processors))
context.pop()
return out
def render_placeholder(placeholder, context_to_copy, name_fallback="Placeholder",
lang=None, default=None, editable=True, use_cache=True):
"""
Renders plugins for a placeholder on the given page using shallow copies of the
given context, and returns a string containing the rendered output.
Set editable = False to disable front-end editing for this placeholder
during rendering. This is primarily used for the "as" variant of the
render_placeholder tag.
"""
if not placeholder:
return
from cms.utils.plugins import get_plugins
context = context_to_copy.new(context_to_copy)
context.push()
request = context['request']
if not hasattr(request, 'placeholders'):
request.placeholders = []
if placeholder.has_change_permission(request) or not placeholder.cache_placeholder:
request.placeholders.append(placeholder)
if hasattr(placeholder, 'content_cache'):
return mark_safe(placeholder.content_cache)
page = placeholder.page if placeholder else None
# It's kind of duplicate of the similar call in `get_plugins`, but it's required
# to have a valid language in this function for `get_fallback_languages` to work
if lang:
save_language = lang
else:
lang = get_language_from_request(request)
save_language = lang
# Prepend frontedit toolbar output if applicable
toolbar = getattr(request, 'toolbar', None)
if getattr(toolbar, 'edit_mode', False) and getattr(placeholder, 'is_editable', True) and editable:
from cms.middleware.toolbar import toolbar_plugin_processor
processors = (toolbar_plugin_processor,)
edit = True
else:
processors = None
edit = False
if get_cms_setting('PLACEHOLDER_CACHE') and use_cache:
if not edit and placeholder and not hasattr(placeholder, 'cache_checked'):
cached_value = get_placeholder_cache(placeholder, lang)
if not cached_value is None:
restore_sekizai_context(context, cached_value['sekizai'])
return mark_safe(cached_value['content'])
if page:
template = page.template
else:
template = None
plugins = [plugin for plugin in get_plugins(request, placeholder, template, lang=lang)]
# Add extra context as defined in settings, but do not overwrite existing context variables,
# since settings are general and database/template are specific
# TODO this should actually happen as a plugin context processor, but these currently overwrite
# existing context -- maybe change this order?
slot = getattr(placeholder, 'slot', None)
if slot:
for key, value in get_placeholder_conf("extra_context", slot, template, {}).items():
if key not in context:
context[key] = value
content = []
watcher = Watcher(context)
content.extend(render_plugins(plugins, context, placeholder, processors))
toolbar_content = ''
if edit and editable:
if not hasattr(request.toolbar, 'placeholders'):
request.toolbar.placeholders = {}
if placeholder.pk not in request.toolbar.placeholders:
request.toolbar.placeholders[placeholder.pk] = placeholder
toolbar_content = mark_safe(render_placeholder_toolbar(placeholder, context, name_fallback, save_language))
if content:
content = mark_safe("".join(content))
elif default:
#should be nodelist from a template
content = mark_safe(default.render(context_to_copy))
else:
content = ''
context['content'] = content
context['placeholder'] = toolbar_content
context['edit'] = edit
result = render_to_string("cms/toolbar/content.html", context)
changes = watcher.get_changes()
if placeholder and not edit and placeholder.cache_placeholder and get_cms_setting('PLACEHOLDER_CACHE') and use_cache:
set_placeholder_cache(placeholder, lang, content={'content': result, 'sekizai': changes})
context.pop()
return result
def render_placeholder_toolbar(placeholder, context, name_fallback, save_language):
from cms.plugin_pool import plugin_pool
request = context['request']
page = placeholder.page if placeholder else None
if not page:
page = getattr(request, 'current_page', None)
if page:
if name_fallback and not placeholder:
placeholder = Placeholder.objects.create(slot=name_fallback)
page.placeholders.add(placeholder)
placeholder.page = page
if placeholder:
slot = placeholder.slot
else:
slot = None
context.push()
# to restrict child-only plugins from draggables..
context['allowed_plugins'] = [cls.__name__ for cls in plugin_pool.get_all_plugins(slot, page)] + plugin_pool.get_system_plugins()
context['placeholder'] = placeholder
context['language'] = save_language
context['page'] = page
toolbar = render_to_string("cms/toolbar/placeholder.html", context)
context.pop()
return toolbar
|
Manishearth/servo | refs/heads/master | tests/wpt/web-platform-tests/mixed-content/generic/tools/generate.py | 96 | #!/usr/bin/env python
import os, sys, json
from common_paths import *
import spec_validator
import argparse
def expand_pattern(expansion_pattern, test_expansion_schema):
expansion = {}
for artifact_key in expansion_pattern:
artifact_value = expansion_pattern[artifact_key]
if artifact_value == '*':
expansion[artifact_key] = test_expansion_schema[artifact_key]
elif isinstance(artifact_value, list):
expansion[artifact_key] = artifact_value
elif isinstance(artifact_value, dict):
# Flattened expansion.
expansion[artifact_key] = []
values_dict = expand_pattern(artifact_value,
test_expansion_schema[artifact_key])
for sub_key in values_dict.keys():
expansion[artifact_key] += values_dict[sub_key]
else:
expansion[artifact_key] = [artifact_value]
return expansion
def permute_expansion(expansion, artifact_order, selection = {}, artifact_index = 0):
assert isinstance(artifact_order, list), "artifact_order should be a list"
if artifact_index >= len(artifact_order):
yield selection
return
artifact_key = artifact_order[artifact_index]
for artifact_value in expansion[artifact_key]:
selection[artifact_key] = artifact_value
for next_selection in permute_expansion(expansion,
artifact_order,
selection,
artifact_index + 1):
yield next_selection
def generate_selection(selection, spec, test_html_template_basename):
selection['spec_name'] = spec['name']
selection['spec_title'] = spec['title']
selection['spec_description'] = spec['description']
selection['spec_specification_url'] = spec['specification_url']
test_filename = test_file_path_pattern % selection
test_headers_filename = test_filename + ".headers"
test_directory = os.path.dirname(test_filename)
full_path = os.path.join(spec_directory, test_directory)
test_html_template = get_template(test_html_template_basename)
test_js_template = get_template("test.js.template")
disclaimer_template = get_template('disclaimer.template')
test_description_template = get_template("test_description.template")
html_template_filename = os.path.join(template_directory,
test_html_template_basename)
generated_disclaimer = disclaimer_template \
% {'generating_script_filename': os.path.relpath(__file__,
test_root_directory),
'html_template_filename': os.path.relpath(html_template_filename,
test_root_directory)}
selection['generated_disclaimer'] = generated_disclaimer.rstrip()
test_description_template = \
test_description_template.rstrip().replace("\n", "\n" + " " * 33)
selection['test_description'] = test_description_template % selection
# Adjust the template for the test invoking JS. Indent it to look nice.
indent = "\n" + " " * 6;
test_js_template = indent + test_js_template.replace("\n", indent);
selection['test_js'] = test_js_template % selection
# Directory for the test files.
try:
os.makedirs(full_path)
except:
pass
# TODO(kristijanburnik): Implement the opt-in-method here.
opt_in_method = selection['opt_in_method']
selection['meta_opt_in'] = ''
if opt_in_method == 'meta-csp':
selection['meta_opt_in'] = '<meta http-equiv="Content-Security-Policy" ' + \
'content="block-all-mixed-content">'
elif opt_in_method == 'http-csp':
opt_in_headers = "Content-Security-Policy: block-all-mixed-content\n"
write_file(test_headers_filename, opt_in_headers)
elif opt_in_method == 'no-opt-in':
pass
else:
raise ValueError("Invalid opt_in_method %s" % opt_in_method)
# Write out the generated HTML file.
write_file(test_filename, test_html_template % selection)
def generate_test_source_files(spec_json, target):
test_expansion_schema = spec_json['test_expansion_schema']
specification = spec_json['specification']
spec_json_js_template = get_template('spec_json.js.template')
write_file(generated_spec_json_filename,
spec_json_js_template % {'spec_json': json.dumps(spec_json)})
# Choose a debug/release template depending on the target.
html_template = "test.%s.html.template" % target
artifact_order = test_expansion_schema.keys() + ['name']
# Create list of excluded tests.
exclusion_dict = {}
for excluded_pattern in spec_json['excluded_tests']:
excluded_expansion = \
expand_pattern(excluded_pattern,
test_expansion_schema)
for excluded_selection in permute_expansion(excluded_expansion, artifact_order):
excluded_selection_path = selection_pattern % excluded_selection
exclusion_dict[excluded_selection_path] = True
for spec in specification:
for expansion_pattern in spec['test_expansion']:
expansion = expand_pattern(expansion_pattern,
test_expansion_schema)
for selection in permute_expansion(expansion, artifact_order):
selection_path = selection_pattern % selection
if not selection_path in exclusion_dict:
generate_selection(selection,
spec,
html_template)
else:
print 'Excluding selection:', selection_path
def main(target, spec_filename):
spec_json = load_spec_json(spec_filename);
spec_validator.assert_valid_spec_json(spec_json)
generate_test_source_files(spec_json, target)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test suite generator utility')
parser.add_argument('-t', '--target', type = str,
choices = ("release", "debug"), default = "release",
help = 'Sets the appropriate template for generating tests')
parser.add_argument('-s', '--spec', type = str, default = None,
help = 'Specify a file used for describing and generating the tests')
# TODO(kristijanburnik): Add option for the spec_json file.
args = parser.parse_args()
main(args.target, args.spec)
|
kaapstorm/i_support | refs/heads/master | src/i_support/pic/tests.py | 24123 | from django.test import TestCase
# Create your tests here.
|
TheTypoMaster/chromium-crosswalk | refs/heads/master | tools/telemetry/telemetry/value/__init__.py | 23 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The Value hierarchy provides a way of representing the values measurements
produce such that they can be merged across runs, grouped by page, and output
to different targets.
The core Value concept provides the basic functionality:
- association with a page, may be none
- naming and units
- importance tracking [whether a value will show up on a waterfall or output
file by default]
- other metadata, such as a description of what was measured
- default conversion to scalar and string
- merging properties
A page may actually run a few times during a single telemetry session.
Downstream consumers of test results typically want to group these runs
together, then compute summary statistics across runs. Value provides the
Merge* family of methods for this kind of aggregation.
"""
import os
from telemetry.core import discover
from telemetry.core import util
# When combining a pair of Values togehter, it is sometimes ambiguous whether
# the values should be concatenated, or one should be picked as representative.
# The possible merging policies are listed here.
CONCATENATE = 'concatenate'
PICK_FIRST = 'pick-first'
# When converting a Value to its buildbot equivalent, the context in which the
# value is being interpreted actually affects the conversion. This is insane,
# but there you have it. There are three contexts in which Values are converted
# for use by buildbot, represented by these output-intent values.
PER_PAGE_RESULT_OUTPUT_CONTEXT = 'per-page-result-output-context'
COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT = 'merged-pages-result-output-context'
SUMMARY_RESULT_OUTPUT_CONTEXT = 'summary-result-output-context'
class Value(object):
"""An abstract value produced by a telemetry page test.
"""
def __init__(self, page, name, units, important, description,
tir_label):
"""A generic Value object.
Args:
page: A Page object, may be given as None to indicate that the value
represents results for multiple pages.
name: A value name string, may contain a dot. Values from the same test
with the same prefix before the dot may be considered to belong to
the same chart.
units: A units string.
important: Whether the value is "important". Causes the value to appear
by default in downstream UIs.
description: A string explaining in human-understandable terms what this
value represents.
tir_label: The string label of the TimelineInteractionRecord with
which this value is associated.
"""
# TODO(eakuefner): Check story here after migration (crbug.com/442036)
if not isinstance(name, basestring):
raise ValueError('name field of Value must be string.')
if not isinstance(units, basestring):
raise ValueError('units field of Value must be string.')
if not isinstance(important, bool):
raise ValueError('important field of Value must be bool.')
if not ((description is None) or isinstance(description, basestring)):
raise ValueError('description field of Value must absent or string.')
if not ((tir_label is None) or
isinstance(tir_label, basestring)):
raise ValueError('tir_label field of Value must absent or '
'string.')
self.page = page
self.name = name
self.units = units
self.important = important
self.description = description
self.tir_label = tir_label
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
return hash(str(self))
def IsMergableWith(self, that):
return (self.units == that.units and
type(self) == type(that) and
self.important == that.important)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
"""Combines the provided list of values into a single compound value.
When a page runs multiple times, it may produce multiple values. This
function is given the same-named values across the multiple runs, and has
the responsibility of producing a single result.
It must return a single Value. If merging does not make sense, the
implementation must pick a representative value from one of the runs.
For instance, it may be given
[ScalarValue(page, 'a', 1), ScalarValue(page, 'a', 2)]
and it might produce
ListOfScalarValues(page, 'a', [1, 2])
"""
raise NotImplementedError()
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
"""Combines the provided values into a single compound value.
When a full pageset runs, a single value_name will usually end up getting
collected for multiple pages. For instance, we may end up with
[ScalarValue(page1, 'a', 1),
ScalarValue(page2, 'a', 2)]
This function takes in the values of the same name, but across multiple
pages, and produces a single summary result value. In this instance, it
could produce a ScalarValue(None, 'a', 1.5) to indicate averaging, or even
ListOfScalarValues(None, 'a', [1, 2]) if concatenated output was desired.
Some results are so specific to a page that they make no sense when
aggregated across pages. If merging values of this type across pages is
non-sensical, this method may return None.
"""
raise NotImplementedError()
def _IsImportantGivenOutputIntent(self, output_context):
if output_context == PER_PAGE_RESULT_OUTPUT_CONTEXT:
return False
elif output_context == COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT:
return self.important
elif output_context == SUMMARY_RESULT_OUTPUT_CONTEXT:
return self.important
def GetBuildbotDataType(self, output_context):
"""Returns the buildbot's equivalent data_type.
This should be one of the values accepted by perf_tests_results_helper.py.
"""
raise NotImplementedError()
def GetBuildbotValue(self):
"""Returns the buildbot's equivalent value."""
raise NotImplementedError()
def GetChartAndTraceNameForPerPageResult(self):
chart_name, _ = _ConvertValueNameToChartAndTraceName(self.name)
trace_name = self.page.display_name
return chart_name, trace_name
@property
def name_suffix(self):
"""Returns the string after a . in the name, or the full name otherwise."""
if '.' in self.name:
return self.name.split('.', 1)[1]
else:
return self.name
def GetChartAndTraceNameForComputedSummaryResult(
self, trace_tag):
chart_name, trace_name = (
_ConvertValueNameToChartAndTraceName(self.name))
if trace_tag:
return chart_name, trace_name + trace_tag
else:
return chart_name, trace_name
def GetRepresentativeNumber(self):
"""Gets a single scalar value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
def GetRepresentativeString(self):
"""Gets a string value that best-represents this value.
Returns None if not possible.
"""
raise NotImplementedError()
@staticmethod
def GetJSONTypeName():
"""Gets the typename for serialization to JSON using AsDict."""
raise NotImplementedError()
def AsDict(self):
"""Pre-serializes a value to a dict for output as JSON."""
return self._AsDictImpl()
def _AsDictImpl(self):
d = {
'name': self.name,
'type': self.GetJSONTypeName(),
'units': self.units,
'important': self.important
}
if self.description:
d['description'] = self.description
if self.tir_label:
d['tir_label'] = self.tir_label
if self.page:
d['page_id'] = self.page.id
return d
def AsDictWithoutBaseClassEntries(self):
full_dict = self.AsDict()
base_dict_keys = set(self._AsDictImpl().keys())
# Extracts only entries added by the subclass.
return dict([(k, v) for (k, v) in full_dict.iteritems()
if k not in base_dict_keys])
@staticmethod
def FromDict(value_dict, page_dict):
"""Produces a value from a value dict and a page dict.
Value dicts are produced by serialization to JSON, and must be accompanied
by a dict mapping page IDs to pages, also produced by serialization, in
order to be completely deserialized. If deserializing multiple values, use
ListOfValuesFromListOfDicts instead.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
return Value.ListOfValuesFromListOfDicts([value_dict], page_dict)[0]
@staticmethod
def ListOfValuesFromListOfDicts(value_dicts, page_dict):
"""Takes a list of value dicts to values.
Given a list of value dicts produced by AsDict, this method
deserializes the dicts given a dict mapping page IDs to pages.
This method performs memoization for deserializing a list of values
efficiently, where FromDict is meant to handle one-offs.
values: a list of value dicts produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
value_dir = os.path.dirname(__file__)
value_classes = discover.DiscoverClasses(
value_dir, util.GetTelemetryDir(),
Value, index_by_class_name=True)
value_json_types = dict((value_classes[x].GetJSONTypeName(), x) for x in
value_classes)
values = []
for value_dict in value_dicts:
value_class = value_classes[value_json_types[value_dict['type']]]
assert 'FromDict' in value_class.__dict__, \
'Subclass doesn\'t override FromDict'
values.append(value_class.FromDict(value_dict, page_dict))
return values
@staticmethod
def GetConstructorKwArgs(value_dict, page_dict):
"""Produces constructor arguments from a value dict and a page dict.
Takes a dict parsed from JSON and an index of pages and recovers the
keyword arguments to be passed to the constructor for deserializing the
dict.
value_dict: a dictionary produced by AsDict() on a value subclass.
page_dict: a dictionary mapping IDs to page objects.
"""
d = {
'name': value_dict['name'],
'units': value_dict['units']
}
description = value_dict.get('description', None)
if description:
d['description'] = description
else:
d['description'] = None
page_id = value_dict.get('page_id', None)
if page_id:
d['page'] = page_dict[int(page_id)]
else:
d['page'] = None
d['important'] = False
tir_label = value_dict.get('tir_label', None)
if tir_label:
d['tir_label'] = tir_label
else:
d['tir_label'] = None
return d
def ValueNameFromTraceAndChartName(trace_name, chart_name=None):
"""Mangles a trace name plus optional chart name into a standard string.
A value might just be a bareword name, e.g. numPixels. In that case, its
chart may be None.
But, a value might also be intended for display with other values, in which
case the chart name indicates that grouping. So, you might have
screen.numPixels, screen.resolution, where chartName='screen'.
"""
assert trace_name != 'url', 'The name url cannot be used'
if chart_name:
return '%s.%s' % (chart_name, trace_name)
else:
assert '.' not in trace_name, ('Trace names cannot contain "." with an '
'empty chart_name since this is used to delimit chart_name.trace_name.')
return trace_name
def _ConvertValueNameToChartAndTraceName(value_name):
"""Converts a value_name into the equivalent chart-trace name pair.
Buildbot represents values by the measurement name and an optional trace name,
whereas telemetry represents values with a chart_name.trace_name convention,
where chart_name is optional. This convention is also used by chart_json.
This converts from the telemetry convention to the buildbot convention,
returning a 2-tuple (measurement_name, trace_name).
"""
if '.' in value_name:
return value_name.split('.', 1)
else:
return value_name, value_name
|
jiangjinjinyxt/vnpy | refs/heads/master | vnpy/api/femas/pyscript/generate_md_functions.py | 24 | # encoding: UTF-8
__author__ = 'CHENXY'
from string import join
from femas_struct import structDict
def processCallBack(line):
orignalLine = line
line = line.replace('\tvirtual void ', '') # 删除行首的无效内容
line = line.replace('{};\n', '') # 删除行尾的无效内容
content = line.split('(')
cbName = content[0] # 回调函数名称
cbArgs = content[1] # 回调函数参数
if cbArgs[-1] == ' ':
cbArgs = cbArgs.replace(') ', '')
else:
cbArgs = cbArgs.replace(')', '')
cbArgsList = cbArgs.split(', ') # 将每个参数转化为列表
cbArgsTypeList = []
cbArgsValueList = []
for arg in cbArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
cbArgsTypeList.append(content[0]) # 参数类型列表
cbArgsValueList.append(content[1]) # 参数数据列表
createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine)
createProcess(cbName, cbArgsTypeList, cbArgsValueList)
def createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine):
# 从回调函数生成任务对象,并放入队列
funcline = orignalLine.replace('\tvirtual void ', 'void ' + apiName + '::')
funcline = funcline.replace('{};', '')
ftask.write(funcline)
ftask.write('{\n')
ftask.write("\tTask task = Task();\n")
ftask.write("\ttask.task_name = " + cbName.upper() + ";\n")
# define常量
global define_count
fdefine.write("#define " + cbName.upper() + ' ' + str(define_count) + '\n')
define_count = define_count + 1
# switch段代码
fswitch.write("case " + cbName.upper() + ':\n')
fswitch.write("{\n")
fswitch.write("\tthis->" + cbName.replace('On', 'process') + '(task);\n')
fswitch.write("\tbreak;\n")
fswitch.write("}\n")
fswitch.write("\n")
for i, type_ in enumerate(cbArgsTypeList):
if type_ == 'int':
ftask.write("\ttask.task_id = " + cbArgsValueList[i] + ";\n")
elif type_ == 'bool':
ftask.write("\ttask.task_last = " + cbArgsValueList[i] + ";\n")
elif 'RspInfoField' in type_:
ftask.write("\tif (pRspInfo)\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_error = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\tCSecurityFtdcRspInfoField empty_error = CSecurityFtdcRspInfoField();\n")
ftask.write("\t\tmemset(&empty_error, 0, sizeof(empty_error));\n")
ftask.write("\t\ttask.task_error = empty_error;\n")
ftask.write("\t}\n")
else:
ftask.write("\ttask.task_data = " + cbArgsValueList[i] + ";\n")
ftask.write("\tthis->task_queue.push(task);\n")
ftask.write("};\n")
ftask.write("\n")
def createProcess(cbName, cbArgsTypeList, cbArgsValueList):
# 从队列中提取任务,并转化为python字典
fprocess.write("void " + apiName + '::' + cbName.replace('On', 'process') + '(Task task)' + "\n")
fprocess.write("{\n")
fprocess.write("\tPyLock lock;\n")
onArgsList = []
for i, type_ in enumerate(cbArgsTypeList):
if 'RspInfoField' in type_:
fprocess.write("\t"+ type_ + ' task_error = any_cast<' + type_ + '>(task.task_error);\n')
fprocess.write("\t"+ "dict error;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'error["' + key + '"] = task_error.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('error')
elif type_ in structDict:
fprocess.write("\t"+ type_ + ' task_data = any_cast<' + type_ + '>(task.task_data);\n')
fprocess.write("\t"+ "dict data;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'data["' + key + '"] = task_data.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('data')
elif type_ == 'bool':
onArgsList.append('task.task_last')
elif type_ == 'int':
onArgsList.append('task.task_id')
onArgs = join(onArgsList, ', ')
fprocess.write('\tthis->' + cbName.replace('On', 'on') + '(' + onArgs +');\n')
fprocess.write("};\n")
fprocess.write("\n")
def processFunction(line):
line = line.replace('\tvirtual int ', '') # 删除行首的无效内容
line = line.replace(') = 0;\n', '') # 删除行尾的无效内容
content = line.split('(')
fcName = content[0] # 回调函数名称
fcArgs = content[1] # 回调函数参数
fcArgs = fcArgs.replace(')', '')
fcArgsList = fcArgs.split(', ') # 将每个参数转化为列表
fcArgsTypeList = []
fcArgsValueList = []
for arg in fcArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
fcArgsTypeList.append(content[0]) # 参数类型列表
fcArgsValueList.append(content[1]) # 参数数据列表
if len(fcArgsTypeList)>0 and fcArgsTypeList[0] in structDict:
createFunction(fcName, fcArgsTypeList, fcArgsValueList)
def createFunction(fcName, fcArgsTypeList, fcArgsValueList):
type_ = fcArgsTypeList[0]
struct = structDict[type_]
ffunction.write(fcName + '\n')
ffunction.write('{\n')
ffunction.write('\t' + type_ +' myreq = ' + type_ + '();\n')
ffunction.write('\tmemset(&myreq, 0, sizeof(myreq));\n')
for key, value in struct.items():
if value == 'string':
line = '\tgetChar(req, "' + key + '", myreq.' + key + ');\n'
elif value == 'int':
line = '\tgetInt(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'double':
line = '\tgetDouble(req, "' + key + '", &myreq.' + key + ');\n'
ffunction.write(line)
ffunction.write('\tint i = this->api->' + fcName + '(&myreq, nRequestID);\n')
ffunction.write('\treturn i;\n')
ffunction.write('};\n')
ffunction.write('\n')
#########################################################
apiName = 'MdApi'
fcpp = open('USTPFtdcMduserApi.h', 'r')
ftask = open('femas_md_task.cpp', 'w')
fprocess = open('femas_md_process.cpp', 'w')
ffunction = open('femas_md_function.cpp', 'w')
fdefine = open('femas_md_define.cpp', 'w')
fswitch = open('femas_md_switch.cpp', 'w')
define_count = 1
for line in fcpp:
if "\tvirtual void On" in line:
processCallBack(line)
elif "\tvirtual int" in line:
processFunction(line)
fcpp.close()
ftask.close()
fprocess.close()
ffunction.close()
fswitch.close()
fdefine.close() |
longde123/MultiversePlatform | refs/heads/master | server/src/multiverse/simpleclient/testclientgroups/group14.py | 1 | #
# The Multiverse Platform is made available under the MIT License.
#
# Copyright (c) 2012 The Multiverse Foundation
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
#
from multiverse.mars import *
from multiverse.mars.objects import *
from multiverse.mars.util import *
from multiverse.server.math import *
from multiverse.server.events import *
from multiverse.server.objects import *
from multiverse.server.engine import *
from multiverse.server.util import *
from multiverse.msgsys import *
from multiverse.simpleclient import *
from java.lang import *
# PlayerClient instance
Log.debug("playerclient.py starting PlayerThread");
playerClient = PlayerClient("--zero_y --position (2000505,81334,271534) --square_side 100000")
Log.debug("completed playerclient.py")
|
basicthinker/Sexain-MemController | refs/heads/master | gem5-stable/configs/splash2/cluster.py | 11 | # Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
# Simple test script
#
# "m5 test.py"
import os
import optparse
import sys
import m5
from m5.objects import *
m5.util.addToPath('../common')
# --------------------
# Define Command Line Options
# ====================
parser = optparse.OptionParser()
parser.add_option("-d", "--detailed", action="store_true")
parser.add_option("-t", "--timing", action="store_true")
parser.add_option("-m", "--maxtick", type="int")
parser.add_option("-c", "--numclusters",
help="Number of clusters", type="int")
parser.add_option("-n", "--numcpus",
help="Number of cpus in total", type="int")
parser.add_option("-f", "--frequency",
default = "1GHz",
help="Frequency of each CPU")
parser.add_option("--l1size",
default = "32kB")
parser.add_option("--l1latency",
default = 1)
parser.add_option("--l2size",
default = "256kB")
parser.add_option("--l2latency",
default = 10)
parser.add_option("--rootdir",
help="ROot directory of Splash2",
default="/dist/splash2/codes/")
parser.add_option("-b", "--benchmark",
help="Splash 2 benchmark to run")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# --------------------
# Define Splash2 Benchmarks
# ====================
class Cholesky(LiveProcess):
executable = options.rootdir + '/kernels/cholesky/CHOLESKY'
cmd = 'CHOLESKY -p' + str(options.numcpus) + ' '\
+ options.rootdir + '/kernels/cholesky/inputs/tk23.O'
class FFT(LiveProcess):
executable = options.rootdir + 'kernels/fft/FFT'
cmd = 'FFT -p' + str(options.numcpus) + ' -m18'
class LU_contig(LiveProcess):
executable = options.rootdir + 'kernels/lu/contiguous_blocks/LU'
cmd = 'LU -p' + str(options.numcpus)
class LU_noncontig(LiveProcess):
executable = options.rootdir + 'kernels/lu/non_contiguous_blocks/LU'
cmd = 'LU -p' + str(options.numcpus)
class Radix(LiveProcess):
executable = options.rootdir + 'kernels/radix/RADIX'
cmd = 'RADIX -n524288 -p' + str(options.numcpus)
class Barnes(LiveProcess):
executable = options.rootdir + 'apps/barnes/BARNES'
cmd = 'BARNES'
input = options.rootdir + 'apps/barnes/input.p' + str(options.numcpus)
class FMM(LiveProcess):
executable = options.rootdir + 'apps/fmm/FMM'
cmd = 'FMM'
input = options.rootdir + 'apps/fmm/inputs/input.2048.p' + str(options.numcpus)
class Ocean_contig(LiveProcess):
executable = options.rootdir + 'apps/ocean/contiguous_partitions/OCEAN'
cmd = 'OCEAN -p' + str(options.numcpus)
class Ocean_noncontig(LiveProcess):
executable = options.rootdir + 'apps/ocean/non_contiguous_partitions/OCEAN'
cmd = 'OCEAN -p' + str(options.numcpus)
class Raytrace(LiveProcess):
executable = options.rootdir + 'apps/raytrace/RAYTRACE'
cmd = 'RAYTRACE -p' + str(options.numcpus) + ' ' \
+ options.rootdir + 'apps/raytrace/inputs/teapot.env'
class Water_nsquared(LiveProcess):
executable = options.rootdir + 'apps/water-nsquared/WATER-NSQUARED'
cmd = 'WATER-NSQUARED'
input = options.rootdir + 'apps/water-nsquared/input.p' + str(options.numcpus)
class Water_spatial(LiveProcess):
executable = options.rootdir + 'apps/water-spatial/WATER-SPATIAL'
cmd = 'WATER-SPATIAL'
input = options.rootdir + 'apps/water-spatial/input.p' + str(options.numcpus)
# --------------------
# Base L1 Cache Definition
# ====================
class L1(BaseCache):
latency = options.l1latency
mshrs = 12
tgts_per_mshr = 8
# ----------------------
# Base L2 Cache Definition
# ----------------------
class L2(BaseCache):
latency = options.l2latency
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
# ----------------------
# Define the clusters with their cpus
# ----------------------
class Cluster:
pass
cpusPerCluster = options.numcpus/options.numclusters
busFrequency = Frequency(options.frequency)
busFrequency *= cpusPerCluster
all_cpus = []
all_l1s = []
all_l1buses = []
if options.timing:
clusters = [ Cluster() for i in xrange(options.numclusters)]
for j in xrange(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = CoherentBus(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [TimingSimpleCPU(cpu_id = i + cluster.id,
clock=options.frequency)
for i in xrange(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
elif options.detailed:
clusters = [ Cluster() for i in xrange(options.numclusters)]
for j in xrange(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = CoherentBus(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [DerivO3CPU(cpu_id = i + cluster.id,
clock=options.frequency)
for i in xrange(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
else:
clusters = [ Cluster() for i in xrange(options.numclusters)]
for j in xrange(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = CoherentBus(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [AtomicSimpleCPU(cpu_id = i + cluster.id,
clock=options.frequency)
for i in xrange(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
# ----------------------
# Create a system, and add system wide objects
# ----------------------
system = System(cpu = all_cpus, l1_ = all_l1s, l1bus_ = all_l1buses,
physmem = SimpleMemory(),
membus = CoherentBus(clock = busFrequency))
system.clock = '1GHz'
system.toL2bus = CoherentBus(clock = busFrequency)
system.l2 = L2(size = options.l2size, assoc = 8)
# ----------------------
# Connect the L2 cache and memory together
# ----------------------
system.physmem.port = system.membus.master
system.l2.cpu_side = system.toL2bus.slave
system.l2.mem_side = system.membus.master
# ----------------------
# Connect the L2 cache and clusters together
# ----------------------
for cluster in clusters:
cluster.l1.cpu_side = cluster.clusterbus.master
cluster.l1.mem_side = system.toL2bus.slave
for cpu in cluster.cpus:
cpu.icache_port = cluster.clusterbus.slave
cpu.dcache_port = cluster.clusterbus.slave
# ----------------------
# Define the root
# ----------------------
root = Root(full_system = False, system = system)
# --------------------
# Pick the correct Splash2 Benchmarks
# ====================
if options.benchmark == 'Cholesky':
root.workload = Cholesky()
elif options.benchmark == 'FFT':
root.workload = FFT()
elif options.benchmark == 'LUContig':
root.workload = LU_contig()
elif options.benchmark == 'LUNoncontig':
root.workload = LU_noncontig()
elif options.benchmark == 'Radix':
root.workload = Radix()
elif options.benchmark == 'Barnes':
root.workload = Barnes()
elif options.benchmark == 'FMM':
root.workload = FMM()
elif options.benchmark == 'OceanContig':
root.workload = Ocean_contig()
elif options.benchmark == 'OceanNoncontig':
root.workload = Ocean_noncontig()
elif options.benchmark == 'Raytrace':
root.workload = Raytrace()
elif options.benchmark == 'WaterNSquared':
root.workload = Water_nsquared()
elif options.benchmark == 'WaterSpatial':
root.workload = Water_spatial()
else:
m5.util.panic("""
The --benchmark environment variable was set to something improper.
Use Cholesky, FFT, LUContig, LUNoncontig, Radix, Barnes, FMM, OceanContig,
OceanNoncontig, Raytrace, WaterNSquared, or WaterSpatial
""")
# --------------------
# Assign the workload to the cpus
# ====================
for cluster in clusters:
for cpu in cluster.cpus:
cpu.workload = root.workload
# ----------------------
# Run the simulation
# ----------------------
if options.timing or options.detailed:
root.system.mem_mode = 'timing'
# instantiate configuration
m5.instantiate()
# simulate until program terminates
if options.maxtick:
exit_event = m5.simulate(options.maxtick)
else:
exit_event = m5.simulate(m5.MaxTick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
|
rubenvereecken/pokemongo-api | refs/heads/master | POGOProtos/Settings/Master/Item/ReviveAttributes_pb2.py | 16 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/Item/ReviveAttributes.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/Item/ReviveAttributes.proto',
package='POGOProtos.Settings.Master.Item',
syntax='proto3',
serialized_pb=_b('\n6POGOProtos/Settings/Master/Item/ReviveAttributes.proto\x12\x1fPOGOProtos.Settings.Master.Item\"\'\n\x10ReviveAttributes\x12\x13\n\x0bsta_percent\x18\x01 \x01(\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REVIVEATTRIBUTES = _descriptor.Descriptor(
name='ReviveAttributes',
full_name='POGOProtos.Settings.Master.Item.ReviveAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sta_percent', full_name='POGOProtos.Settings.Master.Item.ReviveAttributes.sta_percent', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=91,
serialized_end=130,
)
DESCRIPTOR.message_types_by_name['ReviveAttributes'] = _REVIVEATTRIBUTES
ReviveAttributes = _reflection.GeneratedProtocolMessageType('ReviveAttributes', (_message.Message,), dict(
DESCRIPTOR = _REVIVEATTRIBUTES,
__module__ = 'POGOProtos.Settings.Master.Item.ReviveAttributes_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.Item.ReviveAttributes)
))
_sym_db.RegisterMessage(ReviveAttributes)
# @@protoc_insertion_point(module_scope)
|
lunafeng/django | refs/heads/master | tests/template_tests/filter_tests/test_truncatewords.py | 215 | from django.template.defaultfilters import truncatewords
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class TruncatewordsTests(SimpleTestCase):
@setup({'truncatewords01':
'{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}'})
def test_truncatewords01(self):
output = self.engine.render_to_string('truncatewords01', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
@setup({'truncatewords02': '{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}'})
def test_truncatewords02(self):
output = self.engine.render_to_string('truncatewords02', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')})
self.assertEqual(output, 'alpha & ... alpha & ...')
class FunctionTests(SimpleTestCase):
def test_truncate(self):
self.assertEqual(truncatewords('A sentence with a few words in it', 1), 'A ...')
def test_truncate2(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 5),
'A sentence with a few ...',
)
def test_overtruncate(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 100),
'A sentence with a few words in it',
)
def test_invalid_number(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 'not a number'),
'A sentence with a few words in it',
)
def test_non_string_input(self):
self.assertEqual(truncatewords(123, 2), '123')
|
stephaneAG/PengPod700 | refs/heads/master | QtEsrc/backup_qt/qt-everywhere-opensource-src-4.8.5/src/3rdparty/webkit/Source/ThirdParty/gyp/test/actions/src/subdir2/make-file.py | 973 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = "Hello from make-file.py\n"
open(sys.argv[1], 'wb').write(contents)
|
yceruto/django | refs/heads/master | django/contrib/auth/models.py | 15 | from __future__ import unicode_literals
from django.core.mail import send_mail
from django.core import validators
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils.crypto import get_random_string
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, make_password, is_password_usable)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import python_2_unicode_compatible
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label,
model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=50)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), default=timezone.now)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
for backend in auth.get_backends():
if hasattr(backend, "has_perm"):
if backend.has_perm(user, perm, obj):
return True
return False
def _user_has_module_perms(user, app_label):
for backend in auth.get_backends():
if hasattr(backend, "has_module_perms"):
if backend.has_module_perms(user, app_label):
return True
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'his/her group.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def delete(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def set_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def check_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
|
Subsets and Splits